From 743db567ddb1119124276fe8ad0c1fbe4e1147bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=83=AD=E5=BF=83=E7=BD=91=E5=8F=8B?= <2285480916@qq.com> Date: Sun, 19 Mar 2023 17:09:36 +0800 Subject: [PATCH] =?UTF-8?q?=E6=8F=90=E4=BA=A4=E5=BC=80=E6=BA=90=E4=BB=A3?= =?UTF-8?q?=E7=A0=81=EF=BC=8C=E5=AE=8C=E5=96=84=E7=9B=AE=E6=A0=87=E7=B3=BB?= =?UTF-8?q?=E7=BB=9F=E4=BB=BB=E5=8A=A1=E4=B9=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- scrapy-master/.bandit.yml | 20 + scrapy-master/.bumpversion.cfg | 7 + scrapy-master/.coveragerc | 6 + scrapy-master/.flake8 | 22 + scrapy-master/.git-blame-ignore-revs | 7 + scrapy-master/.gitattributes | 1 + .../.github/ISSUE_TEMPLATE/bug_report.md | 41 + .../.github/ISSUE_TEMPLATE/feature_request.md | 33 + scrapy-master/.github/workflows/checks.yml | 42 + scrapy-master/.github/workflows/publish.yml | 21 + .../.github/workflows/tests-macos.yml | 26 + .../.github/workflows/tests-ubuntu.yml | 66 + .../.github/workflows/tests-windows.yml | 46 + scrapy-master/.gitignore | 28 + scrapy-master/.isort.cfg | 2 + scrapy-master/.pre-commit-config.yaml | 24 + scrapy-master/.readthedocs.yml | 17 + scrapy-master/AUTHORS | 58 + scrapy-master/CODE_OF_CONDUCT.md | 133 + scrapy-master/CONTRIBUTING.md | 6 + scrapy-master/INSTALL.md | 4 + scrapy-master/LICENSE | 27 + scrapy-master/MANIFEST.in | 26 + scrapy-master/NEWS | 1 + scrapy-master/README.rst | 113 + scrapy-master/artwork/README.rst | 20 + scrapy-master/artwork/qlassik.zip | Bin 0 -> 120204 bytes scrapy-master/artwork/scrapy-blog-logo.xcf | Bin 0 -> 52428 bytes scrapy-master/artwork/scrapy-logo.jpg | Bin 0 -> 23398 bytes scrapy-master/codecov.yml | 6 + scrapy-master/conftest.py | 82 + scrapy-master/docs/Makefile | 96 + scrapy-master/docs/README.rst | 68 + scrapy-master/docs/_ext/scrapydocs.py | 148 + scrapy-master/docs/_static/custom.css | 10 + .../docs/_static/selectors-sample1.html | 17 + scrapy-master/docs/_templates/layout.html | 11 + scrapy-master/docs/_tests/quotes.html | 281 + scrapy-master/docs/_tests/quotes1.html | 281 + scrapy-master/docs/conf.py | 321 + scrapy-master/docs/conftest.py | 34 + scrapy-master/docs/contributing.rst | 316 + scrapy-master/docs/faq.rst | 423 ++ scrapy-master/docs/index.rst | 282 + scrapy-master/docs/intro/examples.rst | 20 + scrapy-master/docs/intro/install.rst | 285 + scrapy-master/docs/intro/overview.rst | 157 + scrapy-master/docs/intro/tutorial.rst | 827 +++ scrapy-master/docs/news.rst | 5662 +++++++++++++++++ scrapy-master/docs/requirements.txt | 4 + .../docs/topics/_images/inspector_01.png | Bin 0 -> 53922 bytes .../docs/topics/_images/network_01.png | Bin 0 -> 10720 bytes .../docs/topics/_images/network_02.png | Bin 0 -> 82702 bytes .../docs/topics/_images/network_03.png | Bin 0 -> 45506 bytes .../topics/_images/scrapy_architecture.odg | Bin 0 -> 19653 bytes .../topics/_images/scrapy_architecture.png | Bin 0 -> 92558 bytes .../topics/_images/scrapy_architecture_02.png | Bin 0 -> 53978 bytes scrapy-master/docs/topics/api.rst | 278 + scrapy-master/docs/topics/architecture.rst | 176 + scrapy-master/docs/topics/asyncio.rst | 124 + scrapy-master/docs/topics/autothrottle.rst | 163 + scrapy-master/docs/topics/benchmarking.rst | 86 + scrapy-master/docs/topics/broad-crawls.rst | 238 + scrapy-master/docs/topics/commands.rst | 643 ++ scrapy-master/docs/topics/components.rst | 86 + scrapy-master/docs/topics/contracts.rst | 161 + scrapy-master/docs/topics/coroutines.rst | 229 + scrapy-master/docs/topics/debug.rst | 196 + scrapy-master/docs/topics/deploy.rst | 59 + scrapy-master/docs/topics/developer-tools.rst | 320 + scrapy-master/docs/topics/djangoitem.rst | 13 + .../docs/topics/downloader-middleware.rst | 1232 ++++ scrapy-master/docs/topics/dynamic-content.rst | 304 + scrapy-master/docs/topics/email.rst | 193 + scrapy-master/docs/topics/exceptions.rst | 117 + scrapy-master/docs/topics/exporters.rst | 445 ++ scrapy-master/docs/topics/extensions.rst | 385 ++ scrapy-master/docs/topics/feed-exports.rst | 784 +++ scrapy-master/docs/topics/item-pipeline.rst | 278 + scrapy-master/docs/topics/items.rst | 412 ++ scrapy-master/docs/topics/jobs.rst | 88 + scrapy-master/docs/topics/leaks.rst | 291 + scrapy-master/docs/topics/link-extractors.rst | 167 + scrapy-master/docs/topics/loaders.rst | 445 ++ scrapy-master/docs/topics/logging.rst | 351 + scrapy-master/docs/topics/media-pipeline.rst | 787 +++ scrapy-master/docs/topics/practices.rst | 304 + .../docs/topics/request-response.rst | 1334 ++++ scrapy-master/docs/topics/scheduler.rst | 34 + scrapy-master/docs/topics/scrapyd.rst | 13 + scrapy-master/docs/topics/selectors.rst | 1157 ++++ scrapy-master/docs/topics/settings.rst | 1785 ++++++ scrapy-master/docs/topics/shell.rst | 306 + scrapy-master/docs/topics/signals.rst | 487 ++ .../docs/topics/spider-middleware.rst | 471 ++ scrapy-master/docs/topics/spiders.rst | 901 +++ scrapy-master/docs/topics/stats.rst | 120 + scrapy-master/docs/topics/telnetconsole.rst | 207 + scrapy-master/docs/utils/linkfix.py | 68 + scrapy-master/docs/versioning.rst | 69 + scrapy-master/extras/coverage-report.sh | 7 + scrapy-master/extras/qps-bench-server.py | 61 + scrapy-master/extras/qpsclient.py | 55 + scrapy-master/extras/scrapy.1 | 74 + scrapy-master/extras/scrapy_bash_completion | 20 + scrapy-master/extras/scrapy_zsh_completion | 213 + scrapy-master/pylintrc | 99 + scrapy-master/pytest.ini | 28 + scrapy-master/scrapy/VERSION | 1 + scrapy-master/scrapy/__init__.py | 48 + scrapy-master/scrapy/__main__.py | 4 + scrapy-master/scrapy/cmdline.py | 186 + scrapy-master/scrapy/commands/__init__.py | 211 + scrapy-master/scrapy/commands/bench.py | 57 + scrapy-master/scrapy/commands/check.py | 108 + scrapy-master/scrapy/commands/crawl.py | 37 + scrapy-master/scrapy/commands/edit.py | 40 + scrapy-master/scrapy/commands/fetch.py | 83 + scrapy-master/scrapy/commands/genspider.py | 202 + scrapy-master/scrapy/commands/list.py | 13 + scrapy-master/scrapy/commands/parse.py | 349 + scrapy-master/scrapy/commands/runspider.py | 58 + scrapy-master/scrapy/commands/settings.py | 62 + scrapy-master/scrapy/commands/shell.py | 91 + scrapy-master/scrapy/commands/startproject.py | 138 + scrapy-master/scrapy/commands/version.py | 32 + scrapy-master/scrapy/commands/view.py | 21 + scrapy-master/scrapy/contracts/__init__.py | 181 + scrapy-master/scrapy/contracts/default.py | 110 + scrapy-master/scrapy/core/__init__.py | 3 + .../scrapy/core/downloader/__init__.py | 218 + .../scrapy/core/downloader/contextfactory.py | 174 + .../core/downloader/handlers/__init__.py | 85 + .../core/downloader/handlers/datauri.py | 21 + .../scrapy/core/downloader/handlers/file.py | 17 + .../scrapy/core/downloader/handlers/ftp.py | 125 + .../scrapy/core/downloader/handlers/http.py | 4 + .../scrapy/core/downloader/handlers/http10.py | 39 + .../scrapy/core/downloader/handlers/http11.py | 667 ++ .../scrapy/core/downloader/handlers/http2.py | 130 + .../scrapy/core/downloader/handlers/s3.py | 83 + .../scrapy/core/downloader/middleware.py | 96 + scrapy-master/scrapy/core/downloader/tls.py | 85 + .../scrapy/core/downloader/webclient.py | 226 + scrapy-master/scrapy/core/engine.py | 499 ++ scrapy-master/scrapy/core/http2/__init__.py | 0 scrapy-master/scrapy/core/http2/agent.py | 169 + scrapy-master/scrapy/core/http2/protocol.py | 438 ++ scrapy-master/scrapy/core/http2/stream.py | 492 ++ scrapy-master/scrapy/core/scheduler.py | 358 ++ scrapy-master/scrapy/core/scraper.py | 376 ++ scrapy-master/scrapy/core/spidermw.py | 341 + scrapy-master/scrapy/crawler.py | 396 ++ .../scrapy/downloadermiddlewares/__init__.py | 0 .../scrapy/downloadermiddlewares/ajaxcrawl.py | 95 + .../scrapy/downloadermiddlewares/cookies.py | 144 + .../downloadermiddlewares/decompression.py | 94 + .../downloadermiddlewares/defaultheaders.py | 21 + .../downloadermiddlewares/downloadtimeout.py | 25 + .../scrapy/downloadermiddlewares/httpauth.py | 52 + .../scrapy/downloadermiddlewares/httpcache.py | 149 + .../downloadermiddlewares/httpcompression.py | 107 + .../scrapy/downloadermiddlewares/httpproxy.py | 80 + .../scrapy/downloadermiddlewares/redirect.py | 137 + .../scrapy/downloadermiddlewares/retry.py | 177 + .../scrapy/downloadermiddlewares/robotstxt.py | 121 + .../scrapy/downloadermiddlewares/stats.py | 60 + .../scrapy/downloadermiddlewares/useragent.py | 23 + scrapy-master/scrapy/dupefilters.py | 130 + scrapy-master/scrapy/exceptions.py | 96 + scrapy-master/scrapy/exporters.py | 362 ++ scrapy-master/scrapy/extension.py | 15 + scrapy-master/scrapy/extensions/__init__.py | 0 .../scrapy/extensions/closespider.py | 71 + scrapy-master/scrapy/extensions/corestats.py | 47 + scrapy-master/scrapy/extensions/debug.py | 66 + scrapy-master/scrapy/extensions/feedexport.py | 604 ++ scrapy-master/scrapy/extensions/httpcache.py | 382 ++ scrapy-master/scrapy/extensions/logstats.py | 58 + scrapy-master/scrapy/extensions/memdebug.py | 36 + scrapy-master/scrapy/extensions/memusage.py | 141 + .../scrapy/extensions/postprocessing.py | 169 + .../scrapy/extensions/spiderstate.py | 40 + .../scrapy/extensions/statsmailer.py | 34 + scrapy-master/scrapy/extensions/telnet.py | 115 + scrapy-master/scrapy/extensions/throttle.py | 101 + scrapy-master/scrapy/http/__init__.py | 16 + scrapy-master/scrapy/http/common.py | 7 + scrapy-master/scrapy/http/cookies.py | 194 + scrapy-master/scrapy/http/headers.py | 102 + scrapy-master/scrapy/http/request/__init__.py | 248 + scrapy-master/scrapy/http/request/form.py | 263 + .../scrapy/http/request/json_request.py | 63 + scrapy-master/scrapy/http/request/rpc.py | 33 + .../scrapy/http/response/__init__.py | 249 + scrapy-master/scrapy/http/response/html.py | 12 + scrapy-master/scrapy/http/response/text.py | 295 + scrapy-master/scrapy/http/response/xml.py | 12 + scrapy-master/scrapy/interfaces.py | 17 + scrapy-master/scrapy/item.py | 119 + scrapy-master/scrapy/link.py | 55 + .../scrapy/linkextractors/__init__.py | 103 + .../scrapy/linkextractors/lxmlhtml.py | 244 + scrapy-master/scrapy/loader/__init__.py | 91 + scrapy-master/scrapy/loader/common.py | 21 + scrapy-master/scrapy/loader/processors.py | 20 + scrapy-master/scrapy/logformatter.py | 147 + scrapy-master/scrapy/mail.py | 208 + scrapy-master/scrapy/middleware.py | 88 + scrapy-master/scrapy/mime.types | 750 +++ scrapy-master/scrapy/pipelines/__init__.py | 27 + scrapy-master/scrapy/pipelines/files.py | 551 ++ scrapy-master/scrapy/pipelines/images.py | 231 + scrapy-master/scrapy/pipelines/media.py | 275 + scrapy-master/scrapy/pqueues.py | 233 + scrapy-master/scrapy/resolver.py | 133 + scrapy-master/scrapy/responsetypes.py | 122 + scrapy-master/scrapy/robotstxt.py | 135 + scrapy-master/scrapy/selector/__init__.py | 6 + scrapy-master/scrapy/selector/unified.py | 83 + scrapy-master/scrapy/settings/__init__.py | 486 ++ .../scrapy/settings/default_settings.py | 315 + scrapy-master/scrapy/shell.py | 208 + scrapy-master/scrapy/signalmanager.py | 68 + scrapy-master/scrapy/signals.py | 33 + scrapy-master/scrapy/spiderloader.py | 93 + .../scrapy/spidermiddlewares/__init__.py | 0 .../scrapy/spidermiddlewares/depth.py | 63 + .../scrapy/spidermiddlewares/httperror.py | 59 + .../scrapy/spidermiddlewares/offsite.py | 96 + .../scrapy/spidermiddlewares/referer.py | 385 ++ .../scrapy/spidermiddlewares/urllength.py | 45 + scrapy-master/scrapy/spiders/__init__.py | 101 + scrapy-master/scrapy/spiders/crawl.py | 149 + scrapy-master/scrapy/spiders/feed.py | 145 + scrapy-master/scrapy/spiders/init.py | 31 + scrapy-master/scrapy/spiders/sitemap.py | 100 + scrapy-master/scrapy/squeues.py | 174 + scrapy-master/scrapy/statscollectors.py | 81 + .../templates/project/module/__init__.py | 0 .../templates/project/module/items.py.tmpl | 12 + .../project/module/middlewares.py.tmpl | 103 + .../project/module/pipelines.py.tmpl | 13 + .../templates/project/module/settings.py.tmpl | 93 + .../project/module/spiders/__init__.py | 4 + .../scrapy/templates/project/scrapy.cfg | 11 + .../scrapy/templates/spiders/basic.tmpl | 10 + .../scrapy/templates/spiders/crawl.tmpl | 18 + .../scrapy/templates/spiders/csvfeed.tmpl | 20 + .../scrapy/templates/spiders/xmlfeed.tmpl | 16 + scrapy-master/scrapy/utils/__init__.py | 0 scrapy-master/scrapy/utils/asyncgen.py | 18 + scrapy-master/scrapy/utils/benchserver.py | 46 + scrapy-master/scrapy/utils/boto.py | 10 + scrapy-master/scrapy/utils/conf.py | 219 + scrapy-master/scrapy/utils/console.py | 110 + scrapy-master/scrapy/utils/curl.py | 108 + scrapy-master/scrapy/utils/datatypes.py | 120 + scrapy-master/scrapy/utils/decorators.py | 50 + scrapy-master/scrapy/utils/defer.py | 376 ++ scrapy-master/scrapy/utils/deprecate.py | 179 + scrapy-master/scrapy/utils/display.py | 50 + scrapy-master/scrapy/utils/engine.py | 47 + scrapy-master/scrapy/utils/ftp.py | 36 + scrapy-master/scrapy/utils/gz.py | 34 + scrapy-master/scrapy/utils/httpobj.py | 20 + scrapy-master/scrapy/utils/iterators.py | 165 + scrapy-master/scrapy/utils/job.py | 11 + scrapy-master/scrapy/utils/log.py | 231 + scrapy-master/scrapy/utils/misc.py | 276 + scrapy-master/scrapy/utils/ossignal.py | 25 + scrapy-master/scrapy/utils/project.py | 88 + scrapy-master/scrapy/utils/python.py | 336 + scrapy-master/scrapy/utils/reactor.py | 185 + scrapy-master/scrapy/utils/reqser.py | 27 + scrapy-master/scrapy/utils/request.py | 329 + scrapy-master/scrapy/utils/response.py | 105 + scrapy-master/scrapy/utils/serialize.py | 38 + scrapy-master/scrapy/utils/signal.py | 99 + scrapy-master/scrapy/utils/sitemap.py | 49 + scrapy-master/scrapy/utils/spider.py | 71 + scrapy-master/scrapy/utils/ssl.py | 57 + scrapy-master/scrapy/utils/template.py | 37 + scrapy-master/scrapy/utils/test.py | 130 + scrapy-master/scrapy/utils/testproc.py | 49 + scrapy-master/scrapy/utils/testsite.py | 55 + scrapy-master/scrapy/utils/trackref.py | 65 + scrapy-master/scrapy/utils/url.py | 179 + scrapy-master/scrapy/utils/versions.py | 31 + scrapy-master/sep/README.rst | 7 + scrapy-master/sep/sep-001.rst | 275 + scrapy-master/sep/sep-002.rst | 116 + scrapy-master/sep/sep-003.rst | 172 + scrapy-master/sep/sep-004.rst | 88 + scrapy-master/sep/sep-005.rst | 145 + scrapy-master/sep/sep-006.rst | 76 + scrapy-master/sep/sep-007.rst | 137 + scrapy-master/sep/sep-008.rst | 112 + scrapy-master/sep/sep-009.rst | 140 + scrapy-master/sep/sep-010.rst | 70 + scrapy-master/sep/sep-011.rst | 36 + scrapy-master/sep/sep-012.rst | 92 + scrapy-master/sep/sep-013.rst | 188 + scrapy-master/sep/sep-014.rst | 664 ++ scrapy-master/sep/sep-015.rst | 57 + scrapy-master/sep/sep-016.rst | 300 + scrapy-master/sep/sep-017.rst | 109 + scrapy-master/sep/sep-018.rst | 660 ++ scrapy-master/sep/sep-019.rst | 329 + scrapy-master/sep/sep-020.rst | 207 + scrapy-master/sep/sep-021.rst | 113 + scrapy-master/setup.cfg | 92 + scrapy-master/setup.py | 97 + .../CrawlerProcess/asyncio_custom_loop.py | 19 + .../CrawlerProcess/asyncio_deferred_signal.py | 47 + .../asyncio_enabled_no_reactor.py | 18 + .../CrawlerProcess/asyncio_enabled_reactor.py | 27 + .../asyncio_enabled_reactor_different_loop.py | 28 + .../asyncio_enabled_reactor_same_loop.py | 30 + .../caching_hostname_resolver.py | 35 + .../caching_hostname_resolver_ipv6.py | 22 + .../CrawlerProcess/default_name_resolver.py | 18 + scrapy-master/tests/CrawlerProcess/multi.py | 16 + .../tests/CrawlerProcess/reactor_default.py | 17 + .../reactor_default_twisted_reactor_select.py | 21 + .../tests/CrawlerProcess/reactor_select.py | 19 + ..._select_subclass_twisted_reactor_select.py | 30 + .../reactor_select_twisted_reactor_select.py | 23 + scrapy-master/tests/CrawlerProcess/simple.py | 15 + .../CrawlerProcess/twisted_reactor_asyncio.py | 15 + .../twisted_reactor_custom_settings.py | 14 + ...wisted_reactor_custom_settings_conflict.py | 22 + .../twisted_reactor_custom_settings_same.py | 22 + .../CrawlerProcess/twisted_reactor_poll.py | 15 + .../CrawlerProcess/twisted_reactor_select.py | 15 + .../tests/CrawlerRunner/ip_address.py | 52 + scrapy-master/tests/__init__.py | 39 + scrapy-master/tests/ftpserver.py | 24 + scrapy-master/tests/ignores.txt | 3 + scrapy-master/tests/keys/__init__.py | 61 + scrapy-master/tests/keys/example-com.cert.pem | 26 + scrapy-master/tests/keys/example-com.conf | 84 + .../tests/keys/example-com.gen.README | 24 + scrapy-master/tests/keys/example-com.key.pem | 28 + .../tests/keys/localhost-ip.gen.README | 21 + scrapy-master/tests/keys/localhost.gen.README | 21 + scrapy-master/tests/keys/localhost.ip.crt | 20 + scrapy-master/tests/keys/localhost.ip.key | 28 + scrapy-master/tests/keys/mitmproxy-ca.pem | 50 + scrapy-master/tests/mocks/__init__.py | 0 scrapy-master/tests/mocks/dummydbm.py | 24 + scrapy-master/tests/mockserver.py | 400 ++ scrapy-master/tests/pipelines.py | 16 + scrapy-master/tests/requirements.txt | 16 + .../sample_data/compressed/feed-sample1.tar | Bin 0 -> 20480 bytes .../sample_data/compressed/feed-sample1.xml | Bin 0 -> 9950 bytes .../compressed/feed-sample1.xml.bz2 | Bin 0 -> 1430 bytes .../compressed/feed-sample1.xml.gz | Bin 0 -> 1131 bytes .../sample_data/compressed/feed-sample1.zip | Bin 0 -> 1260 bytes .../tests/sample_data/compressed/html-br.bin | Bin 0 -> 4027 bytes .../sample_data/compressed/html-gzip.bin | Bin 0 -> 8037 bytes .../compressed/html-rawdeflate.bin | Bin 0 -> 8021 bytes .../compressed/html-zlibdeflate.bin | Bin 0 -> 8027 bytes .../html-zstd-static-content-size.bin | Bin 0 -> 8066 bytes .../html-zstd-static-no-content-size.bin | Bin 0 -> 8063 bytes .../html-zstd-streaming-no-content-size.bin | Bin 0 -> 8047 bytes .../compressed/truncated-crc-error-short.gz | Bin 0 -> 1930 bytes .../compressed/truncated-crc-error.gz | Bin 0 -> 5766 bytes .../compressed/unexpected-eof-output.txt | Bin 0 -> 16782 bytes .../sample_data/compressed/unexpected-eof.gz | Bin 0 -> 5134 bytes .../tests/sample_data/feeds/feed-sample1.xml | Bin 0 -> 9950 bytes .../tests/sample_data/feeds/feed-sample2.xml | Bin 0 -> 6388 bytes .../tests/sample_data/feeds/feed-sample3.csv | Bin 0 -> 81 bytes .../tests/sample_data/feeds/feed-sample4.csv | Bin 0 -> 45 bytes .../tests/sample_data/feeds/feed-sample5.csv | Bin 0 -> 47 bytes .../tests/sample_data/feeds/feed-sample6.csv | Bin 0 -> 101 bytes .../link_extractor/linkextractor.html | Bin 0 -> 830 bytes .../link_extractor/linkextractor_latin1.html | Bin 0 -> 585 bytes .../link_extractor/linkextractor_no_href.html | Bin 0 -> 740 bytes .../link_extractor/linkextractor_noenc.html | Bin 0 -> 390 bytes .../python-logo-master-v3-TM-flattened.png | Bin 0 -> 11155 bytes .../files/images/python-powered-h-50x65.png | Bin 0 -> 3243 bytes .../test_site/files/images/scrapy.png | Bin 0 -> 2710 bytes .../tests/sample_data/test_site/index.html | Bin 0 -> 311 bytes .../tests/sample_data/test_site/item1.html | Bin 0 -> 225 bytes .../tests/sample_data/test_site/item2.html | Bin 0 -> 209 bytes scrapy-master/tests/spiders.py | 492 ++ scrapy-master/tests/test_closespider.py | 56 + scrapy-master/tests/test_cmdline/__init__.py | 73 + .../tests/test_cmdline/extensions.py | 14 + scrapy-master/tests/test_cmdline/settings.py | 14 + .../__init__.py | 19 + .../scrapy.cfg | 2 + .../test_spider/__init__.py | 0 .../test_spider/pipelines.py | 14 + .../test_spider/settings.py | 2 + .../test_spider/spiders/__init__.py | 0 .../test_spider/spiders/exception.py | 12 + .../test_spider/spiders/normal.py | 12 + scrapy-master/tests/test_command_check.py | 96 + scrapy-master/tests/test_command_fetch.py | 35 + scrapy-master/tests/test_command_parse.py | 435 ++ scrapy-master/tests/test_command_shell.py | 135 + scrapy-master/tests/test_command_version.py | 45 + scrapy-master/tests/test_commands.py | 1098 ++++ scrapy-master/tests/test_contracts.py | 436 ++ scrapy-master/tests/test_core_downloader.py | 11 + scrapy-master/tests/test_crawl.py | 702 ++ scrapy-master/tests/test_crawler.py | 562 ++ scrapy-master/tests/test_dependencies.py | 39 + .../tests/test_downloader_handlers.py | 1294 ++++ .../tests/test_downloader_handlers_http2.py | 254 + .../tests/test_downloadermiddleware.py | 267 + ...test_downloadermiddleware_ajaxcrawlable.py | 63 + .../test_downloadermiddleware_cookies.py | 734 +++ ...test_downloadermiddleware_decompression.py | 53 + ...est_downloadermiddleware_defaultheaders.py | 35 + ...st_downloadermiddleware_downloadtimeout.py | 41 + .../test_downloadermiddleware_httpauth.py | 115 + .../test_downloadermiddleware_httpcache.py | 572 ++ ...st_downloadermiddleware_httpcompression.py | 400 ++ .../test_downloadermiddleware_httpproxy.py | 475 ++ .../test_downloadermiddleware_redirect.py | 387 ++ .../tests/test_downloadermiddleware_retry.py | 635 ++ .../test_downloadermiddleware_robotstxt.py | 269 + .../tests/test_downloadermiddleware_stats.py | 71 + .../test_downloadermiddleware_useragent.py | 54 + .../tests/test_downloaderslotssettings.py | 72 + scrapy-master/tests/test_dupefilters.py | 278 + scrapy-master/tests/test_engine.py | 558 ++ .../tests/test_engine_stop_download_bytes.py | 74 + .../test_engine_stop_download_headers.py | 72 + scrapy-master/tests/test_exporters.py | 664 ++ scrapy-master/tests/test_extension_telnet.py | 52 + scrapy-master/tests/test_feedexport.py | 2949 +++++++++ .../tests/test_http2_client_protocol.py | 692 ++ scrapy-master/tests/test_http_cookies.py | 67 + scrapy-master/tests/test_http_headers.py | 166 + scrapy-master/tests/test_http_request.py | 1650 +++++ scrapy-master/tests/test_http_response.py | 1023 +++ scrapy-master/tests/test_item.py | 302 + scrapy-master/tests/test_link.py | 57 + scrapy-master/tests/test_linkextractors.py | 817 +++ scrapy-master/tests/test_loader.py | 592 ++ scrapy-master/tests/test_loader_deprecated.py | 745 +++ scrapy-master/tests/test_logformatter.py | 241 + scrapy-master/tests/test_mail.py | 171 + scrapy-master/tests/test_middleware.py | 89 + scrapy-master/tests/test_pipeline_crawl.py | 215 + scrapy-master/tests/test_pipeline_files.py | 688 ++ scrapy-master/tests/test_pipeline_images.py | 646 ++ scrapy-master/tests/test_pipeline_media.py | 567 ++ scrapy-master/tests/test_pipelines.py | 131 + scrapy-master/tests/test_pqueues.py | 157 + scrapy-master/tests/test_proxy_connect.py | 121 + .../tests/test_request_attribute_binding.py | 216 + scrapy-master/tests/test_request_cb_kwargs.py | 194 + scrapy-master/tests/test_request_dict.py | 232 + scrapy-master/tests/test_request_left.py | 58 + scrapy-master/tests/test_responsetypes.py | 120 + .../tests/test_robotstxt_interface.py | 202 + scrapy-master/tests/test_scheduler.py | 349 + scrapy-master/tests/test_scheduler_base.py | 164 + scrapy-master/tests/test_selector.py | 110 + scrapy-master/tests/test_settings/__init__.py | 456 ++ .../tests/test_settings/default_settings.py | 3 + scrapy-master/tests/test_signals.py | 44 + scrapy-master/tests/test_spider.py | 687 ++ .../tests/test_spiderloader/__init__.py | 204 + .../test_spiders/__init__.py | 0 .../test_spiders/nested/__init__.py | 0 .../test_spiders/nested/spider4.py | 10 + .../test_spiderloader/test_spiders/spider0.py | 5 + .../test_spiderloader/test_spiders/spider1.py | 6 + .../test_spiderloader/test_spiders/spider2.py | 6 + .../test_spiderloader/test_spiders/spider3.py | 10 + scrapy-master/tests/test_spidermiddleware.py | 549 ++ .../tests/test_spidermiddleware_depth.py | 41 + .../tests/test_spidermiddleware_httperror.py | 234 + .../tests/test_spidermiddleware_offsite.py | 102 + .../test_spidermiddleware_output_chain.py | 504 ++ .../tests/test_spidermiddleware_referer.py | 1294 ++++ .../tests/test_spidermiddleware_urllength.py | 44 + scrapy-master/tests/test_spiderstate.py | 46 + scrapy-master/tests/test_squeues.py | 187 + scrapy-master/tests/test_squeues_request.py | 226 + scrapy-master/tests/test_stats.py | 96 + scrapy-master/tests/test_toplevel.py | 33 + .../tests/test_urlparse_monkeypatches.py | 11 + scrapy-master/tests/test_utils_asyncgen.py | 20 + scrapy-master/tests/test_utils_asyncio.py | 34 + scrapy-master/tests/test_utils_conf.py | 249 + scrapy-master/tests/test_utils_console.py | 44 + scrapy-master/tests/test_utils_curl.py | 226 + scrapy-master/tests/test_utils_datatypes.py | 324 + scrapy-master/tests/test_utils_defer.py | 233 + scrapy-master/tests/test_utils_deprecate.py | 298 + scrapy-master/tests/test_utils_display.py | 90 + scrapy-master/tests/test_utils_gz.py | 57 + scrapy-master/tests/test_utils_httpobj.py | 26 + scrapy-master/tests/test_utils_iterators.py | 529 ++ scrapy-master/tests/test_utils_log.py | 108 + .../tests/test_utils_misc/__init__.py | 179 + scrapy-master/tests/test_utils_misc/test.egg | Bin 0 -> 2231 bytes ...t_return_with_argument_inside_generator.py | 272 + .../test_walk_modules/__init__.py | 0 .../test_walk_modules/mod/__init__.py | 0 .../test_walk_modules/mod/mod0.py | 0 .../test_utils_misc/test_walk_modules/mod1.py | 0 scrapy-master/tests/test_utils_project.py | 86 + scrapy-master/tests/test_utils_python.py | 259 + scrapy-master/tests/test_utils_request.py | 670 ++ scrapy-master/tests/test_utils_response.py | 200 + scrapy-master/tests/test_utils_serialize.py | 85 + scrapy-master/tests/test_utils_signal.py | 105 + scrapy-master/tests/test_utils_sitemap.py | 301 + scrapy-master/tests/test_utils_spider.py | 36 + scrapy-master/tests/test_utils_template.py | 39 + scrapy-master/tests/test_utils_trackref.py | 89 + scrapy-master/tests/test_utils_url.py | 613 ++ scrapy-master/tests/test_webclient.py | 491 ++ scrapy-master/tests/upper-constraints.txt | 17 + scrapy-master/tox.ini | 189 + ...37\344\273\273\345\212\241\344\271\246.md" | 41 + 524 files changed, 91903 insertions(+) create mode 100644 scrapy-master/.bandit.yml create mode 100644 scrapy-master/.bumpversion.cfg create mode 100644 scrapy-master/.coveragerc create mode 100644 scrapy-master/.flake8 create mode 100644 scrapy-master/.git-blame-ignore-revs create mode 100644 scrapy-master/.gitattributes create mode 100644 scrapy-master/.github/ISSUE_TEMPLATE/bug_report.md create mode 100644 scrapy-master/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 scrapy-master/.github/workflows/checks.yml create mode 100644 scrapy-master/.github/workflows/publish.yml create mode 100644 scrapy-master/.github/workflows/tests-macos.yml create mode 100644 scrapy-master/.github/workflows/tests-ubuntu.yml create mode 100644 scrapy-master/.github/workflows/tests-windows.yml create mode 100644 scrapy-master/.gitignore create mode 100644 scrapy-master/.isort.cfg create mode 100644 scrapy-master/.pre-commit-config.yaml create mode 100644 scrapy-master/.readthedocs.yml create mode 100644 scrapy-master/AUTHORS create mode 100644 scrapy-master/CODE_OF_CONDUCT.md create mode 100644 scrapy-master/CONTRIBUTING.md create mode 100644 scrapy-master/INSTALL.md create mode 100644 scrapy-master/LICENSE create mode 100644 scrapy-master/MANIFEST.in create mode 100644 scrapy-master/NEWS create mode 100644 scrapy-master/README.rst create mode 100644 scrapy-master/artwork/README.rst create mode 100644 scrapy-master/artwork/qlassik.zip create mode 100644 scrapy-master/artwork/scrapy-blog-logo.xcf create mode 100644 scrapy-master/artwork/scrapy-logo.jpg create mode 100644 scrapy-master/codecov.yml create mode 100644 scrapy-master/conftest.py create mode 100644 scrapy-master/docs/Makefile create mode 100644 scrapy-master/docs/README.rst create mode 100644 scrapy-master/docs/_ext/scrapydocs.py create mode 100644 scrapy-master/docs/_static/custom.css create mode 100644 scrapy-master/docs/_static/selectors-sample1.html create mode 100644 scrapy-master/docs/_templates/layout.html create mode 100644 scrapy-master/docs/_tests/quotes.html create mode 100644 scrapy-master/docs/_tests/quotes1.html create mode 100644 scrapy-master/docs/conf.py create mode 100644 scrapy-master/docs/conftest.py create mode 100644 scrapy-master/docs/contributing.rst create mode 100644 scrapy-master/docs/faq.rst create mode 100644 scrapy-master/docs/index.rst create mode 100644 scrapy-master/docs/intro/examples.rst create mode 100644 scrapy-master/docs/intro/install.rst create mode 100644 scrapy-master/docs/intro/overview.rst create mode 100644 scrapy-master/docs/intro/tutorial.rst create mode 100644 scrapy-master/docs/news.rst create mode 100644 scrapy-master/docs/requirements.txt create mode 100644 scrapy-master/docs/topics/_images/inspector_01.png create mode 100644 scrapy-master/docs/topics/_images/network_01.png create mode 100644 scrapy-master/docs/topics/_images/network_02.png create mode 100644 scrapy-master/docs/topics/_images/network_03.png create mode 100644 scrapy-master/docs/topics/_images/scrapy_architecture.odg create mode 100644 scrapy-master/docs/topics/_images/scrapy_architecture.png create mode 100644 scrapy-master/docs/topics/_images/scrapy_architecture_02.png create mode 100644 scrapy-master/docs/topics/api.rst create mode 100644 scrapy-master/docs/topics/architecture.rst create mode 100644 scrapy-master/docs/topics/asyncio.rst create mode 100644 scrapy-master/docs/topics/autothrottle.rst create mode 100644 scrapy-master/docs/topics/benchmarking.rst create mode 100644 scrapy-master/docs/topics/broad-crawls.rst create mode 100644 scrapy-master/docs/topics/commands.rst create mode 100644 scrapy-master/docs/topics/components.rst create mode 100644 scrapy-master/docs/topics/contracts.rst create mode 100644 scrapy-master/docs/topics/coroutines.rst create mode 100644 scrapy-master/docs/topics/debug.rst create mode 100644 scrapy-master/docs/topics/deploy.rst create mode 100644 scrapy-master/docs/topics/developer-tools.rst create mode 100644 scrapy-master/docs/topics/djangoitem.rst create mode 100644 scrapy-master/docs/topics/downloader-middleware.rst create mode 100644 scrapy-master/docs/topics/dynamic-content.rst create mode 100644 scrapy-master/docs/topics/email.rst create mode 100644 scrapy-master/docs/topics/exceptions.rst create mode 100644 scrapy-master/docs/topics/exporters.rst create mode 100644 scrapy-master/docs/topics/extensions.rst create mode 100644 scrapy-master/docs/topics/feed-exports.rst create mode 100644 scrapy-master/docs/topics/item-pipeline.rst create mode 100644 scrapy-master/docs/topics/items.rst create mode 100644 scrapy-master/docs/topics/jobs.rst create mode 100644 scrapy-master/docs/topics/leaks.rst create mode 100644 scrapy-master/docs/topics/link-extractors.rst create mode 100644 scrapy-master/docs/topics/loaders.rst create mode 100644 scrapy-master/docs/topics/logging.rst create mode 100644 scrapy-master/docs/topics/media-pipeline.rst create mode 100644 scrapy-master/docs/topics/practices.rst create mode 100644 scrapy-master/docs/topics/request-response.rst create mode 100644 scrapy-master/docs/topics/scheduler.rst create mode 100644 scrapy-master/docs/topics/scrapyd.rst create mode 100644 scrapy-master/docs/topics/selectors.rst create mode 100644 scrapy-master/docs/topics/settings.rst create mode 100644 scrapy-master/docs/topics/shell.rst create mode 100644 scrapy-master/docs/topics/signals.rst create mode 100644 scrapy-master/docs/topics/spider-middleware.rst create mode 100644 scrapy-master/docs/topics/spiders.rst create mode 100644 scrapy-master/docs/topics/stats.rst create mode 100644 scrapy-master/docs/topics/telnetconsole.rst create mode 100644 scrapy-master/docs/utils/linkfix.py create mode 100644 scrapy-master/docs/versioning.rst create mode 100644 scrapy-master/extras/coverage-report.sh create mode 100644 scrapy-master/extras/qps-bench-server.py create mode 100644 scrapy-master/extras/qpsclient.py create mode 100644 scrapy-master/extras/scrapy.1 create mode 100644 scrapy-master/extras/scrapy_bash_completion create mode 100644 scrapy-master/extras/scrapy_zsh_completion create mode 100644 scrapy-master/pylintrc create mode 100644 scrapy-master/pytest.ini create mode 100644 scrapy-master/scrapy/VERSION create mode 100644 scrapy-master/scrapy/__init__.py create mode 100644 scrapy-master/scrapy/__main__.py create mode 100644 scrapy-master/scrapy/cmdline.py create mode 100644 scrapy-master/scrapy/commands/__init__.py create mode 100644 scrapy-master/scrapy/commands/bench.py create mode 100644 scrapy-master/scrapy/commands/check.py create mode 100644 scrapy-master/scrapy/commands/crawl.py create mode 100644 scrapy-master/scrapy/commands/edit.py create mode 100644 scrapy-master/scrapy/commands/fetch.py create mode 100644 scrapy-master/scrapy/commands/genspider.py create mode 100644 scrapy-master/scrapy/commands/list.py create mode 100644 scrapy-master/scrapy/commands/parse.py create mode 100644 scrapy-master/scrapy/commands/runspider.py create mode 100644 scrapy-master/scrapy/commands/settings.py create mode 100644 scrapy-master/scrapy/commands/shell.py create mode 100644 scrapy-master/scrapy/commands/startproject.py create mode 100644 scrapy-master/scrapy/commands/version.py create mode 100644 scrapy-master/scrapy/commands/view.py create mode 100644 scrapy-master/scrapy/contracts/__init__.py create mode 100644 scrapy-master/scrapy/contracts/default.py create mode 100644 scrapy-master/scrapy/core/__init__.py create mode 100644 scrapy-master/scrapy/core/downloader/__init__.py create mode 100644 scrapy-master/scrapy/core/downloader/contextfactory.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/__init__.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/datauri.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/file.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/ftp.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/http.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/http10.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/http11.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/http2.py create mode 100644 scrapy-master/scrapy/core/downloader/handlers/s3.py create mode 100644 scrapy-master/scrapy/core/downloader/middleware.py create mode 100644 scrapy-master/scrapy/core/downloader/tls.py create mode 100644 scrapy-master/scrapy/core/downloader/webclient.py create mode 100644 scrapy-master/scrapy/core/engine.py create mode 100644 scrapy-master/scrapy/core/http2/__init__.py create mode 100644 scrapy-master/scrapy/core/http2/agent.py create mode 100644 scrapy-master/scrapy/core/http2/protocol.py create mode 100644 scrapy-master/scrapy/core/http2/stream.py create mode 100644 scrapy-master/scrapy/core/scheduler.py create mode 100644 scrapy-master/scrapy/core/scraper.py create mode 100644 scrapy-master/scrapy/core/spidermw.py create mode 100644 scrapy-master/scrapy/crawler.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/__init__.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/ajaxcrawl.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/cookies.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/decompression.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/defaultheaders.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/downloadtimeout.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/httpauth.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/httpcache.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/httpcompression.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/httpproxy.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/redirect.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/retry.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/robotstxt.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/stats.py create mode 100644 scrapy-master/scrapy/downloadermiddlewares/useragent.py create mode 100644 scrapy-master/scrapy/dupefilters.py create mode 100644 scrapy-master/scrapy/exceptions.py create mode 100644 scrapy-master/scrapy/exporters.py create mode 100644 scrapy-master/scrapy/extension.py create mode 100644 scrapy-master/scrapy/extensions/__init__.py create mode 100644 scrapy-master/scrapy/extensions/closespider.py create mode 100644 scrapy-master/scrapy/extensions/corestats.py create mode 100644 scrapy-master/scrapy/extensions/debug.py create mode 100644 scrapy-master/scrapy/extensions/feedexport.py create mode 100644 scrapy-master/scrapy/extensions/httpcache.py create mode 100644 scrapy-master/scrapy/extensions/logstats.py create mode 100644 scrapy-master/scrapy/extensions/memdebug.py create mode 100644 scrapy-master/scrapy/extensions/memusage.py create mode 100644 scrapy-master/scrapy/extensions/postprocessing.py create mode 100644 scrapy-master/scrapy/extensions/spiderstate.py create mode 100644 scrapy-master/scrapy/extensions/statsmailer.py create mode 100644 scrapy-master/scrapy/extensions/telnet.py create mode 100644 scrapy-master/scrapy/extensions/throttle.py create mode 100644 scrapy-master/scrapy/http/__init__.py create mode 100644 scrapy-master/scrapy/http/common.py create mode 100644 scrapy-master/scrapy/http/cookies.py create mode 100644 scrapy-master/scrapy/http/headers.py create mode 100644 scrapy-master/scrapy/http/request/__init__.py create mode 100644 scrapy-master/scrapy/http/request/form.py create mode 100644 scrapy-master/scrapy/http/request/json_request.py create mode 100644 scrapy-master/scrapy/http/request/rpc.py create mode 100644 scrapy-master/scrapy/http/response/__init__.py create mode 100644 scrapy-master/scrapy/http/response/html.py create mode 100644 scrapy-master/scrapy/http/response/text.py create mode 100644 scrapy-master/scrapy/http/response/xml.py create mode 100644 scrapy-master/scrapy/interfaces.py create mode 100644 scrapy-master/scrapy/item.py create mode 100644 scrapy-master/scrapy/link.py create mode 100644 scrapy-master/scrapy/linkextractors/__init__.py create mode 100644 scrapy-master/scrapy/linkextractors/lxmlhtml.py create mode 100644 scrapy-master/scrapy/loader/__init__.py create mode 100644 scrapy-master/scrapy/loader/common.py create mode 100644 scrapy-master/scrapy/loader/processors.py create mode 100644 scrapy-master/scrapy/logformatter.py create mode 100644 scrapy-master/scrapy/mail.py create mode 100644 scrapy-master/scrapy/middleware.py create mode 100644 scrapy-master/scrapy/mime.types create mode 100644 scrapy-master/scrapy/pipelines/__init__.py create mode 100644 scrapy-master/scrapy/pipelines/files.py create mode 100644 scrapy-master/scrapy/pipelines/images.py create mode 100644 scrapy-master/scrapy/pipelines/media.py create mode 100644 scrapy-master/scrapy/pqueues.py create mode 100644 scrapy-master/scrapy/resolver.py create mode 100644 scrapy-master/scrapy/responsetypes.py create mode 100644 scrapy-master/scrapy/robotstxt.py create mode 100644 scrapy-master/scrapy/selector/__init__.py create mode 100644 scrapy-master/scrapy/selector/unified.py create mode 100644 scrapy-master/scrapy/settings/__init__.py create mode 100644 scrapy-master/scrapy/settings/default_settings.py create mode 100644 scrapy-master/scrapy/shell.py create mode 100644 scrapy-master/scrapy/signalmanager.py create mode 100644 scrapy-master/scrapy/signals.py create mode 100644 scrapy-master/scrapy/spiderloader.py create mode 100644 scrapy-master/scrapy/spidermiddlewares/__init__.py create mode 100644 scrapy-master/scrapy/spidermiddlewares/depth.py create mode 100644 scrapy-master/scrapy/spidermiddlewares/httperror.py create mode 100644 scrapy-master/scrapy/spidermiddlewares/offsite.py create mode 100644 scrapy-master/scrapy/spidermiddlewares/referer.py create mode 100644 scrapy-master/scrapy/spidermiddlewares/urllength.py create mode 100644 scrapy-master/scrapy/spiders/__init__.py create mode 100644 scrapy-master/scrapy/spiders/crawl.py create mode 100644 scrapy-master/scrapy/spiders/feed.py create mode 100644 scrapy-master/scrapy/spiders/init.py create mode 100644 scrapy-master/scrapy/spiders/sitemap.py create mode 100644 scrapy-master/scrapy/squeues.py create mode 100644 scrapy-master/scrapy/statscollectors.py create mode 100644 scrapy-master/scrapy/templates/project/module/__init__.py create mode 100644 scrapy-master/scrapy/templates/project/module/items.py.tmpl create mode 100644 scrapy-master/scrapy/templates/project/module/middlewares.py.tmpl create mode 100644 scrapy-master/scrapy/templates/project/module/pipelines.py.tmpl create mode 100644 scrapy-master/scrapy/templates/project/module/settings.py.tmpl create mode 100644 scrapy-master/scrapy/templates/project/module/spiders/__init__.py create mode 100644 scrapy-master/scrapy/templates/project/scrapy.cfg create mode 100644 scrapy-master/scrapy/templates/spiders/basic.tmpl create mode 100644 scrapy-master/scrapy/templates/spiders/crawl.tmpl create mode 100644 scrapy-master/scrapy/templates/spiders/csvfeed.tmpl create mode 100644 scrapy-master/scrapy/templates/spiders/xmlfeed.tmpl create mode 100644 scrapy-master/scrapy/utils/__init__.py create mode 100644 scrapy-master/scrapy/utils/asyncgen.py create mode 100644 scrapy-master/scrapy/utils/benchserver.py create mode 100644 scrapy-master/scrapy/utils/boto.py create mode 100644 scrapy-master/scrapy/utils/conf.py create mode 100644 scrapy-master/scrapy/utils/console.py create mode 100644 scrapy-master/scrapy/utils/curl.py create mode 100644 scrapy-master/scrapy/utils/datatypes.py create mode 100644 scrapy-master/scrapy/utils/decorators.py create mode 100644 scrapy-master/scrapy/utils/defer.py create mode 100644 scrapy-master/scrapy/utils/deprecate.py create mode 100644 scrapy-master/scrapy/utils/display.py create mode 100644 scrapy-master/scrapy/utils/engine.py create mode 100644 scrapy-master/scrapy/utils/ftp.py create mode 100644 scrapy-master/scrapy/utils/gz.py create mode 100644 scrapy-master/scrapy/utils/httpobj.py create mode 100644 scrapy-master/scrapy/utils/iterators.py create mode 100644 scrapy-master/scrapy/utils/job.py create mode 100644 scrapy-master/scrapy/utils/log.py create mode 100644 scrapy-master/scrapy/utils/misc.py create mode 100644 scrapy-master/scrapy/utils/ossignal.py create mode 100644 scrapy-master/scrapy/utils/project.py create mode 100644 scrapy-master/scrapy/utils/python.py create mode 100644 scrapy-master/scrapy/utils/reactor.py create mode 100644 scrapy-master/scrapy/utils/reqser.py create mode 100644 scrapy-master/scrapy/utils/request.py create mode 100644 scrapy-master/scrapy/utils/response.py create mode 100644 scrapy-master/scrapy/utils/serialize.py create mode 100644 scrapy-master/scrapy/utils/signal.py create mode 100644 scrapy-master/scrapy/utils/sitemap.py create mode 100644 scrapy-master/scrapy/utils/spider.py create mode 100644 scrapy-master/scrapy/utils/ssl.py create mode 100644 scrapy-master/scrapy/utils/template.py create mode 100644 scrapy-master/scrapy/utils/test.py create mode 100644 scrapy-master/scrapy/utils/testproc.py create mode 100644 scrapy-master/scrapy/utils/testsite.py create mode 100644 scrapy-master/scrapy/utils/trackref.py create mode 100644 scrapy-master/scrapy/utils/url.py create mode 100644 scrapy-master/scrapy/utils/versions.py create mode 100644 scrapy-master/sep/README.rst create mode 100644 scrapy-master/sep/sep-001.rst create mode 100644 scrapy-master/sep/sep-002.rst create mode 100644 scrapy-master/sep/sep-003.rst create mode 100644 scrapy-master/sep/sep-004.rst create mode 100644 scrapy-master/sep/sep-005.rst create mode 100644 scrapy-master/sep/sep-006.rst create mode 100644 scrapy-master/sep/sep-007.rst create mode 100644 scrapy-master/sep/sep-008.rst create mode 100644 scrapy-master/sep/sep-009.rst create mode 100644 scrapy-master/sep/sep-010.rst create mode 100644 scrapy-master/sep/sep-011.rst create mode 100644 scrapy-master/sep/sep-012.rst create mode 100644 scrapy-master/sep/sep-013.rst create mode 100644 scrapy-master/sep/sep-014.rst create mode 100644 scrapy-master/sep/sep-015.rst create mode 100644 scrapy-master/sep/sep-016.rst create mode 100644 scrapy-master/sep/sep-017.rst create mode 100644 scrapy-master/sep/sep-018.rst create mode 100644 scrapy-master/sep/sep-019.rst create mode 100644 scrapy-master/sep/sep-020.rst create mode 100644 scrapy-master/sep/sep-021.rst create mode 100644 scrapy-master/setup.cfg create mode 100644 scrapy-master/setup.py create mode 100644 scrapy-master/tests/CrawlerProcess/asyncio_custom_loop.py create mode 100644 scrapy-master/tests/CrawlerProcess/asyncio_deferred_signal.py create mode 100644 scrapy-master/tests/CrawlerProcess/asyncio_enabled_no_reactor.py create mode 100644 scrapy-master/tests/CrawlerProcess/asyncio_enabled_reactor.py create mode 100644 scrapy-master/tests/CrawlerProcess/asyncio_enabled_reactor_different_loop.py create mode 100644 scrapy-master/tests/CrawlerProcess/asyncio_enabled_reactor_same_loop.py create mode 100644 scrapy-master/tests/CrawlerProcess/caching_hostname_resolver.py create mode 100644 scrapy-master/tests/CrawlerProcess/caching_hostname_resolver_ipv6.py create mode 100644 scrapy-master/tests/CrawlerProcess/default_name_resolver.py create mode 100644 scrapy-master/tests/CrawlerProcess/multi.py create mode 100644 scrapy-master/tests/CrawlerProcess/reactor_default.py create mode 100644 scrapy-master/tests/CrawlerProcess/reactor_default_twisted_reactor_select.py create mode 100644 scrapy-master/tests/CrawlerProcess/reactor_select.py create mode 100644 scrapy-master/tests/CrawlerProcess/reactor_select_subclass_twisted_reactor_select.py create mode 100644 scrapy-master/tests/CrawlerProcess/reactor_select_twisted_reactor_select.py create mode 100644 scrapy-master/tests/CrawlerProcess/simple.py create mode 100644 scrapy-master/tests/CrawlerProcess/twisted_reactor_asyncio.py create mode 100644 scrapy-master/tests/CrawlerProcess/twisted_reactor_custom_settings.py create mode 100644 scrapy-master/tests/CrawlerProcess/twisted_reactor_custom_settings_conflict.py create mode 100644 scrapy-master/tests/CrawlerProcess/twisted_reactor_custom_settings_same.py create mode 100644 scrapy-master/tests/CrawlerProcess/twisted_reactor_poll.py create mode 100644 scrapy-master/tests/CrawlerProcess/twisted_reactor_select.py create mode 100644 scrapy-master/tests/CrawlerRunner/ip_address.py create mode 100644 scrapy-master/tests/__init__.py create mode 100644 scrapy-master/tests/ftpserver.py create mode 100644 scrapy-master/tests/ignores.txt create mode 100644 scrapy-master/tests/keys/__init__.py create mode 100644 scrapy-master/tests/keys/example-com.cert.pem create mode 100644 scrapy-master/tests/keys/example-com.conf create mode 100644 scrapy-master/tests/keys/example-com.gen.README create mode 100644 scrapy-master/tests/keys/example-com.key.pem create mode 100644 scrapy-master/tests/keys/localhost-ip.gen.README create mode 100644 scrapy-master/tests/keys/localhost.gen.README create mode 100644 scrapy-master/tests/keys/localhost.ip.crt create mode 100644 scrapy-master/tests/keys/localhost.ip.key create mode 100644 scrapy-master/tests/keys/mitmproxy-ca.pem create mode 100644 scrapy-master/tests/mocks/__init__.py create mode 100644 scrapy-master/tests/mocks/dummydbm.py create mode 100644 scrapy-master/tests/mockserver.py create mode 100644 scrapy-master/tests/pipelines.py create mode 100644 scrapy-master/tests/requirements.txt create mode 100644 scrapy-master/tests/sample_data/compressed/feed-sample1.tar create mode 100644 scrapy-master/tests/sample_data/compressed/feed-sample1.xml create mode 100644 scrapy-master/tests/sample_data/compressed/feed-sample1.xml.bz2 create mode 100644 scrapy-master/tests/sample_data/compressed/feed-sample1.xml.gz create mode 100644 scrapy-master/tests/sample_data/compressed/feed-sample1.zip create mode 100644 scrapy-master/tests/sample_data/compressed/html-br.bin create mode 100644 scrapy-master/tests/sample_data/compressed/html-gzip.bin create mode 100644 scrapy-master/tests/sample_data/compressed/html-rawdeflate.bin create mode 100644 scrapy-master/tests/sample_data/compressed/html-zlibdeflate.bin create mode 100644 scrapy-master/tests/sample_data/compressed/html-zstd-static-content-size.bin create mode 100644 scrapy-master/tests/sample_data/compressed/html-zstd-static-no-content-size.bin create mode 100644 scrapy-master/tests/sample_data/compressed/html-zstd-streaming-no-content-size.bin create mode 100644 scrapy-master/tests/sample_data/compressed/truncated-crc-error-short.gz create mode 100644 scrapy-master/tests/sample_data/compressed/truncated-crc-error.gz create mode 100644 scrapy-master/tests/sample_data/compressed/unexpected-eof-output.txt create mode 100644 scrapy-master/tests/sample_data/compressed/unexpected-eof.gz create mode 100644 scrapy-master/tests/sample_data/feeds/feed-sample1.xml create mode 100644 scrapy-master/tests/sample_data/feeds/feed-sample2.xml create mode 100644 scrapy-master/tests/sample_data/feeds/feed-sample3.csv create mode 100644 scrapy-master/tests/sample_data/feeds/feed-sample4.csv create mode 100644 scrapy-master/tests/sample_data/feeds/feed-sample5.csv create mode 100644 scrapy-master/tests/sample_data/feeds/feed-sample6.csv create mode 100644 scrapy-master/tests/sample_data/link_extractor/linkextractor.html create mode 100644 scrapy-master/tests/sample_data/link_extractor/linkextractor_latin1.html create mode 100644 scrapy-master/tests/sample_data/link_extractor/linkextractor_no_href.html create mode 100644 scrapy-master/tests/sample_data/link_extractor/linkextractor_noenc.html create mode 100644 scrapy-master/tests/sample_data/test_site/files/images/python-logo-master-v3-TM-flattened.png create mode 100644 scrapy-master/tests/sample_data/test_site/files/images/python-powered-h-50x65.png create mode 100644 scrapy-master/tests/sample_data/test_site/files/images/scrapy.png create mode 100644 scrapy-master/tests/sample_data/test_site/index.html create mode 100644 scrapy-master/tests/sample_data/test_site/item1.html create mode 100644 scrapy-master/tests/sample_data/test_site/item2.html create mode 100644 scrapy-master/tests/spiders.py create mode 100644 scrapy-master/tests/test_closespider.py create mode 100644 scrapy-master/tests/test_cmdline/__init__.py create mode 100644 scrapy-master/tests/test_cmdline/extensions.py create mode 100644 scrapy-master/tests/test_cmdline/settings.py create mode 100644 scrapy-master/tests/test_cmdline_crawl_with_pipeline/__init__.py create mode 100644 scrapy-master/tests/test_cmdline_crawl_with_pipeline/scrapy.cfg create mode 100644 scrapy-master/tests/test_cmdline_crawl_with_pipeline/test_spider/__init__.py create mode 100644 scrapy-master/tests/test_cmdline_crawl_with_pipeline/test_spider/pipelines.py create mode 100644 scrapy-master/tests/test_cmdline_crawl_with_pipeline/test_spider/settings.py create mode 100644 scrapy-master/tests/test_cmdline_crawl_with_pipeline/test_spider/spiders/__init__.py create mode 100644 scrapy-master/tests/test_cmdline_crawl_with_pipeline/test_spider/spiders/exception.py create mode 100644 scrapy-master/tests/test_cmdline_crawl_with_pipeline/test_spider/spiders/normal.py create mode 100644 scrapy-master/tests/test_command_check.py create mode 100644 scrapy-master/tests/test_command_fetch.py create mode 100644 scrapy-master/tests/test_command_parse.py create mode 100644 scrapy-master/tests/test_command_shell.py create mode 100644 scrapy-master/tests/test_command_version.py create mode 100644 scrapy-master/tests/test_commands.py create mode 100644 scrapy-master/tests/test_contracts.py create mode 100644 scrapy-master/tests/test_core_downloader.py create mode 100644 scrapy-master/tests/test_crawl.py create mode 100644 scrapy-master/tests/test_crawler.py create mode 100644 scrapy-master/tests/test_dependencies.py create mode 100644 scrapy-master/tests/test_downloader_handlers.py create mode 100644 scrapy-master/tests/test_downloader_handlers_http2.py create mode 100644 scrapy-master/tests/test_downloadermiddleware.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_ajaxcrawlable.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_cookies.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_decompression.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_defaultheaders.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_downloadtimeout.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_httpauth.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_httpcache.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_httpcompression.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_httpproxy.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_redirect.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_retry.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_robotstxt.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_stats.py create mode 100644 scrapy-master/tests/test_downloadermiddleware_useragent.py create mode 100644 scrapy-master/tests/test_downloaderslotssettings.py create mode 100644 scrapy-master/tests/test_dupefilters.py create mode 100644 scrapy-master/tests/test_engine.py create mode 100644 scrapy-master/tests/test_engine_stop_download_bytes.py create mode 100644 scrapy-master/tests/test_engine_stop_download_headers.py create mode 100644 scrapy-master/tests/test_exporters.py create mode 100644 scrapy-master/tests/test_extension_telnet.py create mode 100644 scrapy-master/tests/test_feedexport.py create mode 100644 scrapy-master/tests/test_http2_client_protocol.py create mode 100644 scrapy-master/tests/test_http_cookies.py create mode 100644 scrapy-master/tests/test_http_headers.py create mode 100644 scrapy-master/tests/test_http_request.py create mode 100644 scrapy-master/tests/test_http_response.py create mode 100644 scrapy-master/tests/test_item.py create mode 100644 scrapy-master/tests/test_link.py create mode 100644 scrapy-master/tests/test_linkextractors.py create mode 100644 scrapy-master/tests/test_loader.py create mode 100644 scrapy-master/tests/test_loader_deprecated.py create mode 100644 scrapy-master/tests/test_logformatter.py create mode 100644 scrapy-master/tests/test_mail.py create mode 100644 scrapy-master/tests/test_middleware.py create mode 100644 scrapy-master/tests/test_pipeline_crawl.py create mode 100644 scrapy-master/tests/test_pipeline_files.py create mode 100644 scrapy-master/tests/test_pipeline_images.py create mode 100644 scrapy-master/tests/test_pipeline_media.py create mode 100644 scrapy-master/tests/test_pipelines.py create mode 100644 scrapy-master/tests/test_pqueues.py create mode 100644 scrapy-master/tests/test_proxy_connect.py create mode 100644 scrapy-master/tests/test_request_attribute_binding.py create mode 100644 scrapy-master/tests/test_request_cb_kwargs.py create mode 100644 scrapy-master/tests/test_request_dict.py create mode 100644 scrapy-master/tests/test_request_left.py create mode 100644 scrapy-master/tests/test_responsetypes.py create mode 100644 scrapy-master/tests/test_robotstxt_interface.py create mode 100644 scrapy-master/tests/test_scheduler.py create mode 100644 scrapy-master/tests/test_scheduler_base.py create mode 100644 scrapy-master/tests/test_selector.py create mode 100644 scrapy-master/tests/test_settings/__init__.py create mode 100644 scrapy-master/tests/test_settings/default_settings.py create mode 100644 scrapy-master/tests/test_signals.py create mode 100644 scrapy-master/tests/test_spider.py create mode 100644 scrapy-master/tests/test_spiderloader/__init__.py create mode 100644 scrapy-master/tests/test_spiderloader/test_spiders/__init__.py create mode 100644 scrapy-master/tests/test_spiderloader/test_spiders/nested/__init__.py create mode 100644 scrapy-master/tests/test_spiderloader/test_spiders/nested/spider4.py create mode 100644 scrapy-master/tests/test_spiderloader/test_spiders/spider0.py create mode 100644 scrapy-master/tests/test_spiderloader/test_spiders/spider1.py create mode 100644 scrapy-master/tests/test_spiderloader/test_spiders/spider2.py create mode 100644 scrapy-master/tests/test_spiderloader/test_spiders/spider3.py create mode 100644 scrapy-master/tests/test_spidermiddleware.py create mode 100644 scrapy-master/tests/test_spidermiddleware_depth.py create mode 100644 scrapy-master/tests/test_spidermiddleware_httperror.py create mode 100644 scrapy-master/tests/test_spidermiddleware_offsite.py create mode 100644 scrapy-master/tests/test_spidermiddleware_output_chain.py create mode 100644 scrapy-master/tests/test_spidermiddleware_referer.py create mode 100644 scrapy-master/tests/test_spidermiddleware_urllength.py create mode 100644 scrapy-master/tests/test_spiderstate.py create mode 100644 scrapy-master/tests/test_squeues.py create mode 100644 scrapy-master/tests/test_squeues_request.py create mode 100644 scrapy-master/tests/test_stats.py create mode 100644 scrapy-master/tests/test_toplevel.py create mode 100644 scrapy-master/tests/test_urlparse_monkeypatches.py create mode 100644 scrapy-master/tests/test_utils_asyncgen.py create mode 100644 scrapy-master/tests/test_utils_asyncio.py create mode 100644 scrapy-master/tests/test_utils_conf.py create mode 100644 scrapy-master/tests/test_utils_console.py create mode 100644 scrapy-master/tests/test_utils_curl.py create mode 100644 scrapy-master/tests/test_utils_datatypes.py create mode 100644 scrapy-master/tests/test_utils_defer.py create mode 100644 scrapy-master/tests/test_utils_deprecate.py create mode 100644 scrapy-master/tests/test_utils_display.py create mode 100644 scrapy-master/tests/test_utils_gz.py create mode 100644 scrapy-master/tests/test_utils_httpobj.py create mode 100644 scrapy-master/tests/test_utils_iterators.py create mode 100644 scrapy-master/tests/test_utils_log.py create mode 100644 scrapy-master/tests/test_utils_misc/__init__.py create mode 100644 scrapy-master/tests/test_utils_misc/test.egg create mode 100644 scrapy-master/tests/test_utils_misc/test_return_with_argument_inside_generator.py create mode 100644 scrapy-master/tests/test_utils_misc/test_walk_modules/__init__.py create mode 100644 scrapy-master/tests/test_utils_misc/test_walk_modules/mod/__init__.py create mode 100644 scrapy-master/tests/test_utils_misc/test_walk_modules/mod/mod0.py create mode 100644 scrapy-master/tests/test_utils_misc/test_walk_modules/mod1.py create mode 100644 scrapy-master/tests/test_utils_project.py create mode 100644 scrapy-master/tests/test_utils_python.py create mode 100644 scrapy-master/tests/test_utils_request.py create mode 100644 scrapy-master/tests/test_utils_response.py create mode 100644 scrapy-master/tests/test_utils_serialize.py create mode 100644 scrapy-master/tests/test_utils_signal.py create mode 100644 scrapy-master/tests/test_utils_sitemap.py create mode 100644 scrapy-master/tests/test_utils_spider.py create mode 100644 scrapy-master/tests/test_utils_template.py create mode 100644 scrapy-master/tests/test_utils_trackref.py create mode 100644 scrapy-master/tests/test_utils_url.py create mode 100644 scrapy-master/tests/test_webclient.py create mode 100644 scrapy-master/tests/upper-constraints.txt create mode 100644 scrapy-master/tox.ini diff --git a/scrapy-master/.bandit.yml b/scrapy-master/.bandit.yml new file mode 100644 index 0000000..c8e84cc --- /dev/null +++ b/scrapy-master/.bandit.yml @@ -0,0 +1,20 @@ +skips: +- B101 +- B105 +- B301 +- B303 +- B306 +- B307 +- B311 +- B320 +- B321 +- B324 +- B402 # https://github.com/scrapy/scrapy/issues/4180 +- B403 +- B404 +- B406 +- B410 +- B503 +- B603 +- B605 +exclude_dirs: ['tests'] diff --git a/scrapy-master/.bumpversion.cfg b/scrapy-master/.bumpversion.cfg new file mode 100644 index 0000000..4cfba67 --- /dev/null +++ b/scrapy-master/.bumpversion.cfg @@ -0,0 +1,7 @@ +[bumpversion] +current_version = 2.8.0 +commit = True +tag = True +tag_name = {new_version} + +[bumpversion:file:scrapy/VERSION] diff --git a/scrapy-master/.coveragerc b/scrapy-master/.coveragerc new file mode 100644 index 0000000..ad0ee0f --- /dev/null +++ b/scrapy-master/.coveragerc @@ -0,0 +1,6 @@ +[run] +branch = true +include = scrapy/* +omit = + tests/* +disable_warnings = include-ignored diff --git a/scrapy-master/.flake8 b/scrapy-master/.flake8 new file mode 100644 index 0000000..544d729 --- /dev/null +++ b/scrapy-master/.flake8 @@ -0,0 +1,22 @@ +[flake8] + +max-line-length = 119 +ignore = W503, E203 + +exclude = + docs/conf.py + +per-file-ignores = +# Exclude files that are meant to provide top-level imports +# E402: Module level import not at top of file +# F401: Module imported but unused + scrapy/__init__.py:E402 + scrapy/core/downloader/handlers/http.py:F401 + scrapy/http/__init__.py:F401 + scrapy/linkextractors/__init__.py:E402,F401 + scrapy/selector/__init__.py:F401 + scrapy/spiders/__init__.py:E402,F401 + + # Issues pending a review: + scrapy/utils/url.py:F403,F405 + tests/test_loader.py:E741 diff --git a/scrapy-master/.git-blame-ignore-revs b/scrapy-master/.git-blame-ignore-revs new file mode 100644 index 0000000..dbcebfa --- /dev/null +++ b/scrapy-master/.git-blame-ignore-revs @@ -0,0 +1,7 @@ +# .git-blame-ignore-revs +# adding black formatter to all the code +e211ec0aa26ecae0da8ae55d064ea60e1efe4d0d +# re applying black to the code with default line length +303f0a70fcf8067adf0a909c2096a5009162383a +# reaplying black again and removing line length on pre-commit black config +c5cdd0d30ceb68ccba04af0e71d1b8e6678e2962 \ No newline at end of file diff --git a/scrapy-master/.gitattributes b/scrapy-master/.gitattributes new file mode 100644 index 0000000..dfbdf42 --- /dev/null +++ b/scrapy-master/.gitattributes @@ -0,0 +1 @@ +tests/sample_data/** binary diff --git a/scrapy-master/.github/ISSUE_TEMPLATE/bug_report.md b/scrapy-master/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..8ca1010 --- /dev/null +++ b/scrapy-master/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,41 @@ +--- +name: Bug report +about: Report a problem to help us improve +--- + + + +### Description + +[Description of the issue] + +### Steps to Reproduce + +1. [First Step] +2. [Second Step] +3. [and so on...] + +**Expected behavior:** [What you expect to happen] + +**Actual behavior:** [What actually happens] + +**Reproduces how often:** [What percentage of the time does it reproduce?] + +### Versions + +Please paste here the output of executing `scrapy version --verbose` in the command line. + +### Additional context + +Any additional information, configuration, data or output from commands that might be necessary to reproduce or understand the issue. Please try not to include screenshots of code or the command line, paste the contents as text instead. You can use [GitHub Flavored Markdown](https://help.github.com/en/articles/creating-and-highlighting-code-blocks) to make the text look better. diff --git a/scrapy-master/.github/ISSUE_TEMPLATE/feature_request.md b/scrapy-master/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..e05273f --- /dev/null +++ b/scrapy-master/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,33 @@ +--- +name: Feature request +about: Suggest an idea for an enhancement or new feature +--- + + + +## Summary + +One paragraph explanation of the feature. + +## Motivation + +Why are we doing this? What use cases does it support? What is the expected outcome? + +## Describe alternatives you've considered + +A clear and concise description of the alternative solutions you've considered. Be sure to explain why Scrapy's existing customizability isn't suitable for this feature. + +## Additional context + +Any additional information about the feature request here. diff --git a/scrapy-master/.github/workflows/checks.yml b/scrapy-master/.github/workflows/checks.yml new file mode 100644 index 0000000..ee0cb4b --- /dev/null +++ b/scrapy-master/.github/workflows/checks.yml @@ -0,0 +1,42 @@ +name: Checks +on: [push, pull_request] + +jobs: + checks: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - python-version: "3.11" + env: + TOXENV: pylint + - python-version: 3.8 + env: + TOXENV: typing + - python-version: "3.11" # Keep in sync with .readthedocs.yml + env: + TOXENV: docs + - python-version: "3.11" + env: + TOXENV: twinecheck + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Run check + env: ${{ matrix.env }} + run: | + pip install -U tox + tox + + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: pre-commit/action@v3.0.0 diff --git a/scrapy-master/.github/workflows/publish.yml b/scrapy-master/.github/workflows/publish.yml new file mode 100644 index 0000000..22b8996 --- /dev/null +++ b/scrapy-master/.github/workflows/publish.yml @@ -0,0 +1,21 @@ +name: Publish +on: + push: + tags: + - '[0-9]+.[0-9]+.[0-9]+' + +jobs: + publish: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.11 + - run: | + pip install --upgrade build twine + python -m build + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@v1.6.4 + with: + password: ${{ secrets.PYPI_TOKEN }} diff --git a/scrapy-master/.github/workflows/tests-macos.yml b/scrapy-master/.github/workflows/tests-macos.yml new file mode 100644 index 0000000..174d245 --- /dev/null +++ b/scrapy-master/.github/workflows/tests-macos.yml @@ -0,0 +1,26 @@ +name: macOS +on: [push, pull_request] + +jobs: + tests: + runs-on: macos-11 + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Run tests + run: | + pip install -U tox + tox -e py + + - name: Upload coverage report + run: bash <(curl -s https://codecov.io/bash) diff --git a/scrapy-master/.github/workflows/tests-ubuntu.yml b/scrapy-master/.github/workflows/tests-ubuntu.yml new file mode 100644 index 0000000..8fcf90a --- /dev/null +++ b/scrapy-master/.github/workflows/tests-ubuntu.yml @@ -0,0 +1,66 @@ +name: Ubuntu +on: [push, pull_request] + +jobs: + tests: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + include: + - python-version: 3.8 + env: + TOXENV: py + - python-version: 3.9 + env: + TOXENV: py + - python-version: "3.10" + env: + TOXENV: py + - python-version: "3.11" + env: + TOXENV: py + - python-version: "3.11" + env: + TOXENV: asyncio + - python-version: pypy3.9 + env: + TOXENV: pypy3 + + # pinned deps + - python-version: 3.7.13 + env: + TOXENV: pinned + - python-version: 3.7.13 + env: + TOXENV: asyncio-pinned + - python-version: pypy3.7 + env: + TOXENV: pypy3-pinned + + - python-version: "3.11" + env: + TOXENV: extra-deps + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Install system libraries + if: matrix.python-version == 'pypy3.9' || contains(matrix.env.TOXENV, 'pinned') + run: | + sudo apt-get update + sudo apt-get install libxml2-dev libxslt-dev + + - name: Run tests + env: ${{ matrix.env }} + run: | + pip install -U tox + tox + + - name: Upload coverage report + run: bash <(curl -s https://codecov.io/bash) diff --git a/scrapy-master/.github/workflows/tests-windows.yml b/scrapy-master/.github/workflows/tests-windows.yml new file mode 100644 index 0000000..f60c488 --- /dev/null +++ b/scrapy-master/.github/workflows/tests-windows.yml @@ -0,0 +1,46 @@ +name: Windows +on: [push, pull_request] + +jobs: + tests: + runs-on: windows-latest + strategy: + fail-fast: false + matrix: + include: + - python-version: 3.7 + env: + TOXENV: windows-pinned + - python-version: 3.8 + env: + TOXENV: py + - python-version: 3.9 + env: + TOXENV: py + - python-version: "3.10" + env: + TOXENV: py + - python-version: "3.10" + env: + TOXENV: asyncio +# no binary package for lxml for 3.11 yet +# - python-version: "3.11" +# env: +# TOXENV: py +# - python-version: "3.11" +# env: +# TOXENV: asyncio + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Run tests + env: ${{ matrix.env }} + run: | + pip install -U tox + tox diff --git a/scrapy-master/.gitignore b/scrapy-master/.gitignore new file mode 100644 index 0000000..6c5c50e --- /dev/null +++ b/scrapy-master/.gitignore @@ -0,0 +1,28 @@ +/.vagrant +/scrapy.iml +*.pyc +_trial_temp* +dropin.cache +docs/build +*egg-info +.tox +venv +build +dist +.idea +htmlcov/ +.coverage +.pytest_cache/ +.coverage.* +coverage.* +test-output.* +.cache/ +.mypy_cache/ +/tests/keys/localhost.crt +/tests/keys/localhost.key + +# Windows +Thumbs.db + +# OSX miscellaneous +.DS_Store \ No newline at end of file diff --git a/scrapy-master/.isort.cfg b/scrapy-master/.isort.cfg new file mode 100644 index 0000000..f238bf7 --- /dev/null +++ b/scrapy-master/.isort.cfg @@ -0,0 +1,2 @@ +[settings] +profile = black diff --git a/scrapy-master/.pre-commit-config.yaml b/scrapy-master/.pre-commit-config.yaml new file mode 100644 index 0000000..4b90233 --- /dev/null +++ b/scrapy-master/.pre-commit-config.yaml @@ -0,0 +1,24 @@ +repos: +- repo: https://github.com/PyCQA/bandit + rev: 1.7.4 + hooks: + - id: bandit + args: [-r, -c, .bandit.yml] +- repo: https://github.com/PyCQA/flake8 + rev: 5.0.4 # 6.0.0 drops Python 3.7 support + hooks: + - id: flake8 +- repo: https://github.com/psf/black.git + rev: 23.1.0 + hooks: + - id: black +- repo: https://github.com/pycqa/isort + rev: 5.11.5 # 5.12 drops Python 3.7 support + hooks: + - id: isort +- repo: https://github.com/adamchainz/blacken-docs + rev: 1.13.0 + hooks: + - id: blacken-docs + additional_dependencies: + - black==23.1.0 diff --git a/scrapy-master/.readthedocs.yml b/scrapy-master/.readthedocs.yml new file mode 100644 index 0000000..e71d34f --- /dev/null +++ b/scrapy-master/.readthedocs.yml @@ -0,0 +1,17 @@ +version: 2 +formats: all +sphinx: + configuration: docs/conf.py + fail_on_warning: true + +build: + os: ubuntu-20.04 + tools: + # For available versions, see: + # https://docs.readthedocs.io/en/stable/config-file/v2.html#build-tools-python + python: "3.11" # Keep in sync with .github/workflows/checks.yml + +python: + install: + - requirements: docs/requirements.txt + - path: . diff --git a/scrapy-master/AUTHORS b/scrapy-master/AUTHORS new file mode 100644 index 0000000..9706adf --- /dev/null +++ b/scrapy-master/AUTHORS @@ -0,0 +1,58 @@ +Scrapy was brought to life by Shane Evans while hacking a scraping framework +prototype for Mydeco (mydeco.com). It soon became maintained, extended and +improved by Insophia (insophia.com), with the initial sponsorship of Mydeco to +bootstrap the project. In mid-2011, Scrapinghub (now Zyte) became the new +official maintainer. + +Here is the list of the primary authors & contributors: + + * Pablo Hoffman + * Daniel Graña + * Martin Olveyra + * Gabriel García + * Michael Cetrulo + * Artem Bogomyagkov + * Damian Canabal + * Andres Moreira + * Ismael Carnales + * Matías Aguirre + * German Hoffmann + * Anibal Pacheco + * Bruno Deferrari + * Shane Evans + * Ezequiel Rivero + * Patrick Mezard + * Rolando Espinoza + * Ping Yin + * Lucian Ursu + * Shuaib Khan + * Didier Deshommes + * Vikas Dhiman + * Jochen Maes + * Darian Moody + * Jordi Lonch + * Zuhao Wan + * Steven Almeroth + * Tom Mortimer-Jones + * Chris Tilden + * Alexandr N Zamaraev + * Emanuel Schorsch + * Michal Danilak + * Natan Lao + * Hasnain Lakhani + * Pedro Faustino + * Alex Cepoi + * Ilya Baryshev + * Libor Nenadál + * Jae-Myoung Yu + * Vladislav Poluhin + * Marc Abramowitz + * Valentin-Costel Hăloiu + * Jason Yeo + * Сергей Прохоров + * Simon Ratne + * Julien Duponchelle + * Jochen Maes + * Vikas Dhiman + * Juan Picca + * Nicolás Ramírez diff --git a/scrapy-master/CODE_OF_CONDUCT.md b/scrapy-master/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..3c8e4d1 --- /dev/null +++ b/scrapy-master/CODE_OF_CONDUCT.md @@ -0,0 +1,133 @@ + +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +opensource@zyte.com. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/scrapy-master/CONTRIBUTING.md b/scrapy-master/CONTRIBUTING.md new file mode 100644 index 0000000..a05d07a --- /dev/null +++ b/scrapy-master/CONTRIBUTING.md @@ -0,0 +1,6 @@ +The guidelines for contributing are available here: +https://docs.scrapy.org/en/master/contributing.html + +Please do not abuse the issue tracker for support questions. +If your issue topic can be rephrased to "How to ...?", please use the +support channels to get it answered: https://scrapy.org/community/ diff --git a/scrapy-master/INSTALL.md b/scrapy-master/INSTALL.md new file mode 100644 index 0000000..495413f --- /dev/null +++ b/scrapy-master/INSTALL.md @@ -0,0 +1,4 @@ +For information about installing Scrapy see: + +* [Local docs](docs/intro/install.rst) +* [Online docs](https://docs.scrapy.org/en/latest/intro/install.html) diff --git a/scrapy-master/LICENSE b/scrapy-master/LICENSE new file mode 100644 index 0000000..4d0a086 --- /dev/null +++ b/scrapy-master/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) Scrapy developers. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions, and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions, and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of Scrapy nor the names of its contributors may be used + to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/scrapy-master/MANIFEST.in b/scrapy-master/MANIFEST.in new file mode 100644 index 0000000..ae7db51 --- /dev/null +++ b/scrapy-master/MANIFEST.in @@ -0,0 +1,26 @@ +include README.rst +include AUTHORS +include INSTALL +include LICENSE +include MANIFEST.in +include NEWS + +include scrapy/VERSION +include scrapy/mime.types + +include codecov.yml +include conftest.py +include pytest.ini +include requirements-*.txt +include tox.ini + +recursive-include scrapy/templates * +recursive-include scrapy license.txt +recursive-include docs * +prune docs/build + +recursive-include extras * +recursive-include bin * +recursive-include tests * + +global-exclude __pycache__ *.py[cod] diff --git a/scrapy-master/NEWS b/scrapy-master/NEWS new file mode 100644 index 0000000..e63845e --- /dev/null +++ b/scrapy-master/NEWS @@ -0,0 +1 @@ +See docs/news.rst diff --git a/scrapy-master/README.rst b/scrapy-master/README.rst new file mode 100644 index 0000000..970bf2c --- /dev/null +++ b/scrapy-master/README.rst @@ -0,0 +1,113 @@ +.. image:: https://scrapy.org/img/scrapylogo.png + :target: https://scrapy.org/ + +====== +Scrapy +====== + +.. image:: https://img.shields.io/pypi/v/Scrapy.svg + :target: https://pypi.python.org/pypi/Scrapy + :alt: PyPI Version + +.. image:: https://img.shields.io/pypi/pyversions/Scrapy.svg + :target: https://pypi.python.org/pypi/Scrapy + :alt: Supported Python Versions + +.. image:: https://github.com/scrapy/scrapy/workflows/Ubuntu/badge.svg + :target: https://github.com/scrapy/scrapy/actions?query=workflow%3AUbuntu + :alt: Ubuntu + +.. image:: https://github.com/scrapy/scrapy/workflows/macOS/badge.svg + :target: https://github.com/scrapy/scrapy/actions?query=workflow%3AmacOS + :alt: macOS + +.. image:: https://github.com/scrapy/scrapy/workflows/Windows/badge.svg + :target: https://github.com/scrapy/scrapy/actions?query=workflow%3AWindows + :alt: Windows + +.. image:: https://img.shields.io/badge/wheel-yes-brightgreen.svg + :target: https://pypi.python.org/pypi/Scrapy + :alt: Wheel Status + +.. image:: https://img.shields.io/codecov/c/github/scrapy/scrapy/master.svg + :target: https://codecov.io/github/scrapy/scrapy?branch=master + :alt: Coverage report + +.. image:: https://anaconda.org/conda-forge/scrapy/badges/version.svg + :target: https://anaconda.org/conda-forge/scrapy + :alt: Conda Version + + +Overview +======== + +Scrapy is a fast high-level web crawling and web scraping framework, used to +crawl websites and extract structured data from their pages. It can be used for +a wide range of purposes, from data mining to monitoring and automated testing. + +Scrapy is maintained by Zyte_ (formerly Scrapinghub) and `many other +contributors`_. + +.. _many other contributors: https://github.com/scrapy/scrapy/graphs/contributors +.. _Zyte: https://www.zyte.com/ + +Check the Scrapy homepage at https://scrapy.org for more information, +including a list of features. + + +Requirements +============ + +* Python 3.7+ +* Works on Linux, Windows, macOS, BSD + +Install +======= + +The quick way: + +.. code:: bash + + pip install scrapy + +See the install section in the documentation at +https://docs.scrapy.org/en/latest/intro/install.html for more details. + +Documentation +============= + +Documentation is available online at https://docs.scrapy.org/ and in the ``docs`` +directory. + +Releases +======== + +You can check https://docs.scrapy.org/en/latest/news.html for the release notes. + +Community (blog, twitter, mail list, IRC) +========================================= + +See https://scrapy.org/community/ for details. + +Contributing +============ + +See https://docs.scrapy.org/en/master/contributing.html for details. + +Code of Conduct +--------------- + +Please note that this project is released with a Contributor `Code of Conduct `_. + +By participating in this project you agree to abide by its terms. +Please report unacceptable behavior to opensource@zyte.com. + +Companies using Scrapy +====================== + +See https://scrapy.org/companies/ for a list. + +Commercial Support +================== + +See https://scrapy.org/support/ for details. diff --git a/scrapy-master/artwork/README.rst b/scrapy-master/artwork/README.rst new file mode 100644 index 0000000..c1880ef --- /dev/null +++ b/scrapy-master/artwork/README.rst @@ -0,0 +1,20 @@ +============== +Scrapy artwork +============== + +This folder contains the Scrapy artwork resources such as logos and fonts. + +scrapy-logo.jpg +--------------- + +The main Scrapy logo, in JPEG format. + +qlassik.zip +----------- + +The font used for the Scrapy logo. Homepage: https://www.dafont.com/qlassik.font + +scrapy-blog.logo.xcf +-------------------- + +The logo used in the Scrapy blog, in Gimp format. diff --git a/scrapy-master/artwork/qlassik.zip b/scrapy-master/artwork/qlassik.zip new file mode 100644 index 0000000000000000000000000000000000000000..2885c06ef4bab2fd9027bf748bd5ad2a69eb857f GIT binary patch literal 120204 zcmV(+K;6GkO9KQH000080H&bGI*MgI(|wQt0Pr0F01f~E08wmVb8~5HUsOUabaZCy zy?LA@MVU7q5qaO0m06W_SJvHKUENh(-Cfo9HQjUH1H%l*a1Q4TH;5M^iU%U#feN0i zB7(Z2;_HYgq9U&Ax*m8dBI1h2x+-|X)cZtaRZq`=``gd&^ZvO*=Tli(RT=R-@f_dh ziHHPYgir_wkI=g9+jf2LW50Mk!l@?^!s@nN_2!3ub^U%W*@qDl zpF~J_?979wUp)QD+@BEAzl@N)^~@_T_mK^nF7>3W^jL=`^&)t9eS^qWhid*3`9lqZ>7cPje;ywlMm*9Qz+=G{2m3?3R9(YgN zLl^A7^g`iD;VOjfJ#ejj;G#26-}jWi7Gd)dXztpBr(bn3w-f&nK06nF?_YTO!Tn#o z|K{H!9KHsAFI{}mWtR{C$Nm<=9`GaVz4+4o7au}s0Qz$Sq7tC8z-V#J)B2OATGM|- z3il-C=1V*Gd?0xJMXU3f4&v_P-UM$IL{RV#en)=W*^M&XkKyxW?ml*>RsZljy#n;* zNb3Cp{;BXgkPAEK@V)qHU=or0$@TF5hTs+d7&Xz~WJ{2G_qqm=k8DT&#@_pjUwA3I z!{14_4{yTAM>xWo`7n71UVLcgXWg&=EI9D}CdHx2_?{{Q4Wuz88`~~^Jo}9 zg&OFSNWn!^fY%bA70DoKkZHK*ooJlQ zqda~HK6@4P`zy#GeW;CpjSP-MQT%(P;jh8*E$HLf=mNYJ#o;xDKMB&Ne7z2?KZM5d z#mL7G!*L@V5@`J$NCIBl=qqRxeXa8ud;@Bse?e_>C#vHw!trU=Rs(+-zF&j)C&K#~ zXzyV(3Kn6LQ`tQ%{A#4)^FS6&bP0M84FI1T@SmZ-_ahCx1=`;Y#~0yq~t2dZ*B$n+AFhI_a1*HMYw3;q5*jM<|wPIOFefHvssQ)mlW z1Ub7%eZGT^6@EF4)2ai;sf7N$^E)`6fa4F{G4i`du6ItyiH_0ggN;#doL=`l$Lry^ z0)BhL(FgD`guc@GBOJei$`vo4`iO%x(g?Pf(dNnOuT$OlLtJ@V-fs$iN?k@jnUlgxVO@b$Y-# z`6lpAZF~@H4GwBwqBrKdYld4 ze+K%)gOB+*yr(wzMKr?fj@s*iZu<{|%&DzC%>2d#IT6}E8P4B}A|Uf|kjXgc_XNmt zg0*)kdK;dEdzC;|4}$(spNr`~q@%yc9V8>U^DOvq1*nQtpc>ME>Oh}C2GW6=$N*{~ z6KDuooxdX+g@A^U4b(wlpe}Mce?t-E0*#^wP!C0c#(@46#gPXzfnq?DC=N7*5}m(* zZ6<+cPzq=krGfe=)A=*>H48M4e4quC16l<7C$PCZ&@w6jt)L>%K2!pW=tpIsRa5~w zfciR5qZ;Z5I*6)3htL4fI;wT3H>Y$M4FYYVA)q6u-uVM)Q3L208V1@zO`vTw()m3a zN25R|&=}B3)B-vM^mk|)wSmr{aiFtk0_Yr??EDv+M^iu-&@|9RGy`-En(h1+EulG} z%V-|xTC@Ok9nk+o>(L_64QLI}jc5tzCbZo74cd&>0^Nev0o{t$1Kox;bbgJtqm4j! zpiMw`qRl{e0sR#^4s8K?JlYC$H`>yMjrMhZj?O`+0zDU< z2J}31I?(fhK7lSkX8=8b&IEc8odxtlw7>H+bP+lm=*8$9pqHR?fnJKv>pYGwL+1m% z99;nP3UmPIl|X-rUVsh)y$W3j^o8gmpf5rfcOFA8MwbA63Az;MOVMROuSS=5eu7?x zt^j%sx)SKi(F;0{qH7s_1-c68E71#qUWZ=P`7wGGdNI&fqn7}E4SFfi*P^RCKSI}| zmjQhpx(4VC=;c6PkFM?f5Z%b=P3RRsZ$_^K`UZ4e=LhJG=v6@9gkBBw&FD2iZ$Yo^ zd>>-n^+4Z>UI+AT=mwy-0{tGk4ZR-d+tH0c-+^ud`c8Cn=ey`#=nX*Mjot|KJ?Kq9 zZ%1$LdUHZ|C38L+BGgzl8n|(1+0{fqoh2R{?Dt0{Rv7DWLy~?gRQ&^y$vO zqJKm81N|EM3{YqX=-1H$ov)y8pw9vQCVCL)x6tQ1|AHQ2^xNnQK)-{&2=u$?q0X1l z_t2Msejhyy^atq6K!1q-rSmZQ5k!rL(T~x;0(}&H73fdUzjeNZ9%J;UfP=n-9*0Qt zB|wQ^?>vN_K;HoRbM#H1|AD>*^cU!n&KJ>>=-WVliM|8$SLnMye~rG^`2zY4qyLG% z5A?U_2SEP|{jl?S^gHw;pua~y2Koo|DA1?SPdX2xr_p0T{|K@8LG&l|IM6?%pLITm z{=(>A(Gx)bhJFt8@8~}|51?nzFMvLao&PG=wWO?dGPH;@abjn<$d78tKhq9;IoIoS2w^%H^DcLf=_ONFCGUUJPE#c z8gyj_e90Vm+6C~iYrwNEgGXHlo^%6v&`sbuw}8jo2A*;Uc*tGg8IK2#cmjCB2f+`1 z1@sGny}u4@`$n+TTfy$$2exzvSk+x%O?QL+{4-e3hrwPx2G;UXu#bDe7Cr%1aR@Bn zGob(XgNA(%{OGqq_s#*`x(0mAtH9@63|{O9;HzE)I`$*btw+JXLPiE!fIzz*1AY1# zXwXZ*UtI{k_zBRXpTZ~9=UxE*5`q7@0ldSD!ISL=zjrG5(^-hm*Mg2c0KVX4_{Qnr zlg|WS{9W+D=YrQg2>$CP@MJfGM|vZ8qBnyFdMo&xZ-TFRC-{~3fQP&U{Om75H!lP4 zdL`)PufenO9L-O!0vQS*Sp?%&0ol$%e@}znUkyIz4v@h|f#XjDAJ3u=9>$A!7d{$p4S&*K@;{Wz=gN68AI>N8 znfyThwEX_kSN_uZJLzG3)=V@&;@FB zpT!E6nSHziBGP*y)?5O*9BB5hLCb#wKK9SxZT}9Q@)_`%f5B<=Cy?GB@DzFqGS;WT zzf#MhR&_43uk*l?zY8|}pD+^tYZL!#6aQ-yN81E6hiaP+A!YlXk79h|-b1)^-66D; z{uD%f?$mvQhY+s${_=TC_u$ju4XMF}Y7Wl0n!olQuC#XNp2A-LdjI+jXI<~F_0K*1 ztb6zpdxa17U%$8R--CATIS=S@dvf>8?@g|p?%%t267Iv(`|#{O*YAZpoZr2}`RopG zgJqo_jvGl+5kj3*Qn~dJ3;EJulYn zvjlw>t8gL*ciFY)p84cGXz%sc2k#2Gd#<_u`sDS%NB8|9^tod$V07$-`R)af1id3y zT0VrY+0H(?rjSe03x!-E2mRc;1Z^v|O}q9iLr-&i2NA2qQeQ*>G3~^+l0HB$rvjcj zgx1y%p&f7};3(A_!|l;_d2F=R9+}1M!Wf*+lJ-ceJc~!75m%H}pQf?ocqfLcUy?1*4!}fcyDaP`PPA64vmXz~e;@Yv~-X@uv{1WpbRxO=3ae zCwZBNFYr8G3cjEVr-BDGRM|5HS=a*~@8c!mUoAo6i6>^JGLFP!MN>$qoQ?2OwCPzT zKgvnoa77Dv<5$$4PYlF^Ki%$II{b(7 zLOmJohqtGODr;^K6;1F?x$;%lUfO5NqAoi-55D}`7dK;{of)5)T^O6j{LJ{|?EKgi zvxP+GucV*c3f%97eBuzQfCTDrEY$A;-+wI^`ugAu3DYR;O#^p zm^VO{nxN_Bv88IGDVvVQ3*lU9sGO+Jm9y=^vNdpgEjLzly|7r&Oubx9Z$18mVtIPF zFD%n}5R7v8ujp3teaNjWkO&9T5a6(>UV!1#5s$c%$w{KbY`aY@pe<&@qG+3#Xdzdz zLb5H`VJu>fkbA8tA?c{hyM3js8sY>oyOblm?9#SoI?Fgp{s7q=)jtQ?k!d?rQXtDA zZUzUY2Jc=UDG#TGSSUQ%?k}}w;~|%u-hzu8u060j70W0~SHAxOy#L)-ZVq$l$nw|V z-i6LG`uobg#U2l%w9_LQE{*Bf!N|_y8T`SGvqH?WC&%H|GjZD`6WegPtWzvIO_!d%{6{N{}UwVz=;iQhxs2@%VI2w#D`Qi3)ldVdqwaTzQli(46DVX<8C zqR>#K+-gg$s3(b_C%};eQUl=~CYs}5ON&G=S9A&E>lBroA&MbSUE~FOt!(IvLddp- zGi6@AUg1SUBH#vv*Qv64Ig|WhDYYLAsu|U7lc*GS4a0K9x9*G<8SF9T`7E=kCf4^+M z{xmBj;d=`Hu5iVYILur2wfWp@85}}N=Vrhp2O&R~*jPd&p<0sf>Btz>p6DBi5lw~k zoaUy9j5$pLJQ-<5YSn@+tNthi2+dclXd<|00eu@I+=SepMk)jPZFTig4(?CmON&O9 zD5%Jf?CL-@ENY_UR8K2t8nJ)Q2?FTgGOr0lNcFLMmO6g|1a*K_7XV)L?llUXlc)ig zOH+9VW3Q0*nvSPm!J&3;4Bd5j6(3~@r_ z5@C*DjFp5T%Cab&^w^c0sw!9tDVD8-Lpl$BtoqU$-x!R4r-N^UKF)xZ9YQ*bPlr%H z95wLJY1EH&=Aj)XsIghD7pB;_aDW8BWsd}71e(Hh99%FBQzqFU6A=J}0&X>fYty9= z#zH<}<|>VJZbP}SXhMt)w+d^nXz+x{+UV|?++fuy=TrFSq7k-vD-qUW{o}r9h@}lh zx34i-#=Ku9gu|SwSy5~0oSo~6wVJG%l$ToP0emy?vIQ(8koPk1fB+*eGaeYq0E5gk zV*w=?^(e-eC6)4M03-r$#TXbO^gZPYKAOR^T%a$Kh?V(u<%K;XeGN?&K-I!6zcf{G zLE_@p3E7ch95_*k50@b9WTJ)XC=m$g=S z5#Tu1jjv7Myak^`fb;txBM#1}6}HA`Af=27RNtCWfKm~_nbc?F2#lndAHcx?7o658 zRY;K1EbidU%y>Oy=tR!+4G#8)%%Z7Az@xe)oj?q9L`5(ZTPxTCCf;CwUiUXvK_{)E zgF$2UjOom-+qBfOj1b{Th*G-*IZrQ#u4$7L~!Rr+^ z_JP$Aj)>9HSV;*hpr>bDj3Y`cstH)r;sevg_5{QX&=P;^mbEdexMM2gE8Tet?Cx!l z!BM`bUzi5Is%RQ%!8lE0=I}u6V1T4@U~d6L>TyFAl&Ud9iCkB;(?sac$J?OW$$mjt zJXrLyGAGWDMk^V+FT=}%nbK4(meGX#aMtu18=A-M|D}}+A9qd7uf|o)vBP|RIK88l z7)zhE&#g?9#>YVzBIkw^qK3JAb1v6BekOlq;Ik{8uj4n7Q&FCI*ib!)OMt@KU57wB z2t$yg-4~?1IE2@DvkP-xYR8m6XcJkqYjG@t;%k+m;aWk_GYLC8bIaRq8pm*%7qzS@ zkdSYd_g{S3c_$<>M==yf=N0kW$&IK6659+>EU@=996rb~3^t!;nm^DzTV?$eKZ4NO zD>(&-cqDMf(8*}Uje1dU7I%G^(-n6)0H}C(t{88u9c-;1bZjd-?&Xb8C_Xq}mJO-Y zD%xSwD8~Fx+hH)$`dBHPi--0)rL^t1v1Z@I9B!l*221meWOjHqn;mxh3ueZ0hGON} zff(lE{bQNTP}t7ck&k%3?Z&;y;^2T5$sZmy;^l;2bYiL4Vs&&PkbSE2S$qq~ewoRB z7;hOFFlBmV|)2+#V zU(`iC2I0d{5;mw$u9>&v!y6hS%T>L4GJvR=NSpBwUZ#vC+q335knnFdTvok;g4o(&37yT2i{$7cMn@Q*+{b zGB)`1Y$Ad$_QxXN_+VheZZw=u+mQWE_yu1#aU!WZy66^$Q}JrKaQGL=WFnu+j0b%l z=$t`5PR>Rbp_j2fOVDQ*`YccgDFuj&LNJacZ`33!EElOTTn5P>_%m>St+JGdZ-beR z2K-T12>-bek^V`b%PYRUoBfS2AylIDK69Aqek6lQ-5^>-JM;K8m+TmTC?m8_zIyX( zZp)1tKh4qHn4Kq@PG7jUg)dr6yU$tb5?xSKN}F z**n=cn{o!Z@nm!clW5zG%O>wvPr7NpY{@6S=A`-?Z#m)eT>Kk`$ z3jhMKgmR78qUcB*SO{+35AdC4)c(4i)rbz!-^4r@nVfFff*@#u zYzn4eSNh7KNXEdtk~fs_-|x`EYRHj{RK)1tI?~*mmn5E3ZF%m!ciz^DfN(fzZi|9-#^v)JHDOVii&6yt!2Jn>T;U{E+Z(1RL0v__YACM z22+JPg8G@K?*&a@qg!m6W(c8*rLk7C-K6#uTmh4s!eS&cl^I@Ji)6!6NRNf}q!-Fh z?HDT0miLH7ZFm_=^TV1iNo6w>cCuI)tth@gl=BpU0!6}meeLYnlm+iZRf^>5{>09) z1TZ*9xb<5_RcKepA~39s)&!Za0`K7C^PS)0&yYJHhoF{3u{ol75%t@&J|fb<6m;3- zg8B(0YJrodagJt8quEA2!pR~pm`WreTM%l6!t7A$v^~W&HegfNobR7GeYv!||1@%k zX4#S*m$AsJJXN>+(z)wj`Qoq!F3{Fpf6LYTK7UHh5A1vz#^FC<9Ac=C^^RtZVdm$! z6*05?(H?DK8bS+s$qr0*z2aA@&aMOV@&0DCF5*L8GBQyd=-fRuVvTHM#H!XL(THh1{Y)9mT{Rg6G;lcFY`#4e2 zl{c*ezktgrkPk;RN5enK&1Q&>iwS|lUyY?K;preg(|(-?{mL-uDBZF1@*t1$b6a<* zcRV|30Rl}qf8`WSOL^BkKm^rP-U^;4!O6tUjm4m$QjF*D_dRH6=NpzufG2&>lOK@x zp#@e`D%5+~0O-mnHDl6b6`4Q=RZwbfY$Xd`(WIUi1tp?M0$z`aq!mhe4dj58lo_2a z$+kSUXRgpH=1-qnrB{`qlh4VFEEbakvf|nX3E)K?$W-|pQw;*#D%40T@Sb$@|KJ9L`iI!D@ zAOV5VX0pWB%l2^fT0s`Gebjl&g_6ksR+WABPAYQ{J`IcyhvS+|iYK)2hWduI%qz*n zcfV2aB(8YE*x`E{YvB^~YohZkx&`_b)OXC4u>>iO(eT82!D^8-=D?qcmlWELSSg7d zeJyj&#GNNkTr}9lX2j>bz&kN<4A+^uZ02 z16G)Zo~W)H0*4rnf5YE^J}g3f2>hwdB6rXiItGJvTC=|onSgOz=3}S3XCRBAqeg=w z&?ZIJQCiGowStV`v2h!~Gs?6+JQWo5eB{8Gpo*#^$0c47oZ8S(Bvv1&htqE4fZq#zL8sT@yc6T}XgKO(cG^?(OSA(1v7)mXMg9n@M?#8#c@hDFV+C zo~y^AqpeXdk`umIosP*|V52au_*F;26<%nZ)D}V zNvDut$a<42vyh`^~rJS%&NltDxSxi5q;L5F-^ zj@KiE=gn#uunp+CtZ+i50=Xmn1XJHh|KzX}mh6Hp*lJ8Os!Ns@|gh_Kze6Znr>HydsY@^p9Qc8tveMr@WBH}W-~z3wgKGtkOI=Y8EZN-N&iKQVaoGu} ziWn|u+;FBpN^y-UMqM9d`XbSiMG&($ysh2ZP>*s%wk=+?3X=oHX4W8w@4qN+N?dLK zbbxj8om0r)K^84QoDvGWvcSxU)(lyBhQ?GDR#!U9?2tN-Ag>H$RtbVE`D84o(Qzy# z{q42d4nJK>n-e=mVz0b<{?wUFvbk;mi@F<9Y&9{SO%8inT*Hl$68U}=?3xY^qa@~#V&tAyjSHEG+ znTt8f6ZQAs0sJ9y1j(c-A!DnH-Ans=#r$`%l<&YJPS&%lDnM zV+JoO^V2gk1a~m*)lGg6KK~BLm#9`}>p?7cSfTBC=9JldoVdD}_1tOc?@Z52mLQ6{ z*}rN!+03TS*wM? z`QQ)+JMUAj@f<90MB)TZwySB}EDW6s}N5YaI z$cmQUe&VSE=iTTk3gP5zZt*R^&Tx+9}Aq~C6b10=?Z{Z(~KINDDq~>gO9<3IQ>yNiFFMt${6X! zZRd=g+-U3_J#go;q(W{4;6n&ondRE-8(y*Bku*X?kT?9-xL`6qH8-E}rWfW@QmHXk zF9@;Ol>nHxATm&cLwBE zXhDpPBAC(>0IB+2)ltoiZI{BKiW|=9f+|TVQ&xrY8BJ%f)@N}-s6Q^MvK~?GT*xlC z21N*#28kS>IIgj{8*l)Y2rr~3{ldmvdQ;h-ib*`CxNY5s_mp?-*p%T!3eRja(&v$5uO3w-o->WHH*de}cC;d+2S7(hQLc-td-%3i59V-2=^FO{Idw~W0WPBau(W_H zlxf)}kV_rYNp#2{@I|tz$Wa@HRO=t?Qv?Z+p&Cv{WysW>)M5hLnWh+xCu5F4-T)zi zKuQ*2kyD2D%IFW8hyuM{Yw?sE&d@ zQ#?Xz3VuDn<$bKqR6=5RG=no%;iV=<=Q~&$CQr2A4Qzo1NjQ`~pl_I1f$q@!dX3?WCSf;nCh zy?E_~uY2=af_ZE>W{lbee4@#~DJODk-gMJ-ZBZnYw+lK~;Eh}c`mA-wB^Ts5Ue_N` z;fs+JiAI>`fS5>87dV1Zmg>%SOSeByjKt)STeggd2CiDQ%dw0@cuAZ&Kidt$Jc;K^ zT!z=hq)A~v5rpiTFTHmDl%?T_pJW&8h%SP@JsTNUxEG(GH5axr0U&Vo(>C(m6R~eV?$M@SDUQv?25sf7N(~!2MI_vNB{yC zlR(AGII=c#L1s1ouj4f-VF9lgYm1M({eeQG$9vx8dYt3>#aCW2V$fn7V;%Y^b>4;V z>HHA3m zDP|coLrozmu!ZiV#3XfD6bM8_IBAP4kIt7XT`$*N%|&YkUKcCUn!nQ;ox$Zin}er? zO)r4a6PSc{Hg(k)uhJ^Iu46IjhV$UOWxMFc1`Cnm=%gnbl4FMa#6&c#N#cV>YkY@j~n1*PI^lW92Lf8$=57gYnK&_)hW#DJYtZdu>mw^^0(185 z)pHrr8d?#N`?CTUnJVU|Yw=LB;AGck`WMGlKO7k>dgH_HfNkn28}hWFu&rv@{CyUP zXQ-Ovi|tx}votAF{RI!h;&g8E;u6reXi zy-FcIMS@6b%IT~k)Mui_p;&w{M0l(#WGj2EIc4Mf)Re{(1M|C2Y%P^d zO_nV|i%P2QjPBdC{j6jvqUdHIJ6gNE17tUZmRN4ZR>!PT7Lh57xT5enC&#hwm9Zh(*V(tsiMDRBX$jfyN6lexAaYft+h- zo=G}drxiU~#iMfdR>FI^JNX;z zNQa1KO|&fOMrgS`2qj{^kitFg3P++yd5jgnTPxFm5RRD%> z(Z?Hv`d~qmGBerPYnHHm;VDJ8|*mI@K)>VS{>eT?am87wbyTF<`$dyG1RO1ER~gs=x;xMs-(O^bOENf9MZN|!1jP9xzMPbJMkI<6I>vSDU*cWtA(FzD%! zy$cX41xvL(Ox%Ip?TO~({Mc|S@K0^fwbz3mXt9;TYB%2JnHNP}FY3)6TV)PBQyQn( zBHg-jLtB@eX5RwmrPr=|SEAIdwBAC*rS`e>oyhUB})+1xtl zXXi7WY4mnzs|=a9gO1E{(Ta8dQEhhT2D;E?q#2}s&+W$z0>DGV$WDm55Y;mMt#U&_LNyU8(%q_?VeGzGmA&*XxH&`gJy>FqHGI5xtpGL3)Q{VhoZ#B z9n0m6D5hrfao>@1wL#r#O^@5MuGx+iu|+qLG6dVPc>-yWU6}ByStn%MgDz*Zroo9_ zm>6_ZVO`CZ@AaH;Djyn{tHpVBdfi&^k-VT9V$9E3g5!pHqH=H00nYwSP3J37+e@bc zoqig95Og}tP(hsK5%(bhbR{03f}=qZEt0I7=ecPmhA9qsJUS#az8J z6nzC!>>5!axKd%O&FJzdc%AGeqroW`*dU!apo2z#oW}TCkgw5C(&f}Z2<%L?<1Us2 zF;DY)E?fv@a=aqs`;BnP&SU|o<*IRoDA_8dH1T$3!*o;SiQBnE&~n|TM4X8*4Z8+x^@}H&xHKBH@9w3TokPG z3nGTFvTnJ|mM!CVrAOCRPdO_+x_;oZhks}#%Tc$O47CP{TTF$1JGXDnzx-NSUH;SP zx&g--SUY;gPG?|=ty$_c@dxqGKwGBSiVBM7gDiq#dJd(SZC1NyV4JhCAND8-)09!kQZx+%v0B0-;oExGOWTwTWmb8nr_2h6l+4jihM`@4`bnW_K*d&BKqI;V!Oh;9FtLefs1O zbfiiuuvNhVN;01C%9+|d11XPDJz!d}8Ui!kfb$3hZg4jYbFw8D`1Mw55aVz=o1Ys_ z*UH{xL2;A~%Yw9>V|R77fFDBx3>9nC!{?7E=vez01*N%q*CUqmj}2!yh-z$-h%T@6K{*ZH zF3<%=6<$(!K`=x?!8iJcA2b{p@=nmqsk=bHirJN~K=_E3GifcMS2uqO&x1}*vmCQm z)5&&g=Rpa9Cd?~~4!t7_uIYBSW)BJB00{vM0=eEVTjF3p2hc8RcmU?iuruZP$;8l1 zzP?tLWnK$~DiJ{ul3CYtYj&)59DW?|MM_bVG5`uC?HMuC>UR<&eqpo_jyZZtl_dqf z;>Yc*ZRX6#z{uo6z(W-5s!Dd#b+)Ki)1hneF~a??M6NmlI7Dbx{q4qape|HzT^y@FqcnmLyG4 z)J$VK^JhWOHId^KSvb-1NMSBpW)+;mvY(rbSI2l!3i>|Uc^bb3KLN3l`Y>9{ru)fS zpv?u;k6M8b15PCB7NV`Ty{ZgP3xz$zYO=+4N7UygK~z-;bBSStm%0;+$M~~a3;dZX zi%Oa2?GXOF-CzD>z8X_VB!p-Gk!K^ZftkK5hy+SEhIZ_bizQSvkt!S<+SxU?Sy-!UR<(SIxMx3+z z(wr4Cuu$j|6;84>*)p}W)W+P}TxGp4Kn@|RQR}1!FOekzdF=FcCn!3AP@Qz*@1t6&SZ1-bS{C$)Y*ElCVjbnq`tWmj^lxsBoe!8B*s?G_I1zb&Kkzg zP;VgwtJFt(#a*sV!|m+zP2;oKMXwo&_G+cwy9C8!PWq)F%3{iuAvD>JtJ)Dk=&PuL zWm!=fSR#h0gK~wUQBf3mIihKfpvO(sq>CLP61z@_B6MC$rqjmCtspL&p2q8zEu}*t zS_~GGMtpwlhQi!tO;eTV^xXP`>G0+;WpsK}Ap%x0Eno=YP-%Cgu{Bb7p2gcmi%xnr|ZGqpr4fJG|A;(-j$TxfbCa7x#V=xMG*63<* zCf&a78Q3#LQ>zQ=RBc$U#ZfFdg^w00RkA>m8JxxVHaTlMDLJf$ie|XsL$agAG$p&X zWabMwofC3qPH_w~Z)!E$fzYFZ6_BJxjhXHa?n`w!l~H! z#F(pas+^6)r#+|XSt(1Ecuq9Lf&Gn%t0r`2<7#IDUkFInBas|Y0#Uvq)x|L)l|EB?qyev zkFBeQT|-s`y4XVya=8EuV4FVG{}sEB{SL;9-7dOW>#&uMkgV-Sc;t3{Y_mZ6%3-6x;ik5w`r z_~lDFS@a-hf+qH23f=qXQPgbHWgrgy1UDyS6Oo~6GbiXKwqADqNNRav2Nhe#4#fBVB)G7&@kC`FY6C*?Acy_4m(F>L2np(33Ue-yt=z!be4)9)M+swkAiL_|vZ0g9E{PUgf;O~-KfD4LC z%&yI(SWTa+9KDUYkY5KGm5TMTwc9o?1^b~50^wlumJJz8ApJy=Rl2|Vx@#}%3(JD3 zIy(+tbL~r-@nEcb^ID56hSB&%t9!k9zc!l}YIk!xhixlcJ$dCkXQrzi6)k*Zx=Sht zOV>XQpzYY%tXCX8@x_Vw0FlCbyR%;x()krl+KG#VBgf8b;l$AsVWKN($ud#SSeXsG zPjiYQUH-%75}rX~+thQ>)K*J^MhN>EZLuS_E~A=bO#I z*_G)GxEGaWiFs$*?@B$$DQ4;c zW?@Upd8xOuoU4?db5&vyKd08}Rav$)$%-apHsN#SjHFqLlGgyHFNgEK8nAD*1?LRx5MVG_Pz7`&V< z_2*-pP@in+oM~#2>4Zy& zlSsuZ+G(X&9kA8k+w*H@O{_tpA?Nz45FofjaiHSMfFpHGH`~R+w9P(1K~FumO}g`y zIuyP=Bm@tD;JoNl+F)}~g%nW>#nTA~+KfS)nq_iI0oqiawC9(hO;Hcgg}wc(&C)<$ zPL@qg3fi=Lb?n!ZhtVE3a>`M$CfFNxq*|9Z@qb^phQ9z>w}Ls+ZV^1cx(ME$#Q3^y zna{2D`|@)u*b1bAK?ytE2!~7|D1i%6&8#^=3H(k-mya$A{s!ZJRL_Q7S|HtmHZeqo z?}Dnh1ktQp6emm;Pvbj57N@Yh@&wBN=>MVnh5C z$kFvL63z`rIR1ZF-#V8L$ky6HRaes#Weh_^Dy%SIT<2~<#D|_BMtM|M&(8tkN@d@2_sW)VK zK3zl@4G>+UGEh%KLKgOcZ7lZn_m!QjXY>WXw>ob|H=!3m-sG(ErnDj)W_FCb&yxsN z@Y0w*OZU2!2gIx+01h{!37r>tNQ?|O0&Y>*X?le7Bi?YWsL(b1s=w?L-le|V?%aY0 zpe+xLu=$bRYUO0tlISWcn$1-i=dE7;#u}$Bx7xuf`RD7<5dIfKg{YxhZkYB-(?dp? z={ z9Xhpg?DIRYAiL_32YVF#qaN%*z=!JipCA?$ARnb8Pvt+?<%!M(l#cbLbWu3N%Dt=H zijT$rqzyEx3W4AN(Iyrq#E!+IrUu!+llJxTPr zJ=}fd-N*(_X`nUeRCEElirFU3>zAlxyZdHWo*l93xjg?5AL~Qz4j0A>>?uBLG9yKI zgYTycel}BhcH-Cv{%k+yfj>X_ZyxyLuRQSQqbm>mx&5dI{EYOT<0Je(dkhd;H?k2@ z>z_lO(Pq*dsdt}+#fpJ5N0xvE*+sXmN>?Fp-TJF@Bn>*{zum?m1am4{fK-IMOjbvZY*#p1Y%2C*pM<1|7p>$lr+eYV;bf&d0)aY zQkAmBf<|~%*2CFq%_5xE%^l7s6-XMt-QC^1a$l++EFI{g+V9F?3!>apI7rolDppXj7{YgXCM1ofE@Nw`6iJv8AWWM0 z37RTlVtKw3kD0Pc&KGi;q{w)1IA%&pB&^D+N-RHNCrw!lWPC1}MGun?9TSIF?f5>o znFb)@W?~MxvXw|A{3O%QHfiG~xt;kQXO-s(yv|W~SnY9|DY%)Fj=Aq#IU@&0M(| z0|+aPF)#O}6UCSTX``faW(?wv!Yke911*pou8@BP?ds{5)77se=$8TC>FzxWDzeR$ zvf4ZPiZPI5i{avJ7Q+C})F4@vYm7sdTuj700k z9E)(ckPFrW5ya4!Ik1PwQeVE~t_h5B!hS`78Nn@)pS4f?Yk$`jHAW z%{%ETgY|$6nyk*Wv3tg51}Gf%R;q|WO|8JP_wM`PK`QXa9&&*PXmxQUc;(>A2oU^j zdnoBI$3dGzZmT@9DNdJ1i{!3|?}a2z4QZwdT{LOHu$(2wdf~3og3g%s$=K1QF+x0^!Ea8LOtJ zufkgz+sZiY%}|+esM>jql*zpi=ca(;O1CaVy0)8OYjg=pFp11{&p^M#m0gEJxcm5# zU_!Xf>Zr6D*PHb*vExQPhb_+%AjhB&45G6p=LnK2Ng9#TW@7cVzG_<=pZGlOxAv8y6lpt6WJgoX_S6pC-Q} z{{*OTE5n6eR~KABxp~HUseAVPx-hnSevtAH`NA<9drPaHF2&ZV5ziX*{Pk(NX;AMg zw037e8PX?>FYTL3jy3&__4e^8F>e0`G9@h=yW-PCQViuyhtH$is;C%Z0~5L2)IfYa zO?kRnAgDsUS{XXw>g@;LP))>(r8?1YA=%rJEUi6jE|Xb2Z{wD8<`Vw$`5Wl?r#nxP z9r$q=`x61>Q2U;$2lE+TH-5|j?^C^sI^}p}b>I;p1eMC}3Z}7On>Q`QcAXEngkVNE zSYRwPsn(S9Y{xq2NxK^l+TAH^BIqeAyO6N6wNzmuOxL@mOmjHhw?3NKG1Zt)49ym- zBp8&mkf=M3Dao9@XL48ETT?Byi?rY)_aA=CXpZCA{H9QD|1>$BFkD$7**`}<Cf{n_z7&Y)cFDaApR4oqA~DU zS=3vvaUW!w0;@05(Ghy9NFb6PF-e9mAuA6?X?6PUdh9DOK*3{eD$H81fY7UQT9j~! zP$V)twqtF-Waug}B1DKAkx1V2maFASB~;#2)_r$i`aHLk3WdUbeU*@rcjZV}BVj|T z#?re+U%D_=P+VIjj;Yq%^!CQ$NohY9dUDL4ymBgR9)I{rXJG9}yPTezor-%F<2BX! zJ$W1cDH;dgd@>qFJ^c%+YeQ`PR~l6q&xw_@f$kYyE1Lhuy4iaSLU(z>YE4{alJHxm ziK`TG^YbfWZEMooG&HnsIKCWTDBDquTneTH3){PA>RUKzwX?SdHE&K$> zbD~2PZZwFCh0fFXQSwp9m?&bQcDtq?>`&NN51!{S6zosyO}R76q(M0nb-kWau!2r^ zi79nn)2k-Mx=%&oo->`_V!Nf-1o*YY((Fn&k{<0>%z|&4f+m*B0^N&8LSPz>0%YGS(nvEXM4tr@h?H7;f2ihqL9WgqjTH z`sp3Tndy;<6W_bTifbYd>9ZS7Po$?`I7b%<$bw`@BkLlT0S2C^G?M9}rn%9$1(07$ znz6}palGtCjhL;Ars^aET#&}^#fQjS(N1&`+R?S#-Y&7NaBQ#B3fUoaVm+u8(8{4+ zKT@xzJGIZP2fMUs9b`XfYa6nh?o;!aJS$@@`cRu==5j~dO?s6sB=EW$_R6D`GJ6O~ zxB52F-81a62?IBn{SLOw)5!w5R(OovY>F@^^C+hZ7^`wf;3ZMe1zw?Bn-oVDI8F|U zbbl{TxRA@Um2ra3OQtLnTd-wL6ENTeQRcP1STJ0!Ey(O!JmK%+=+2Wn8W+=RY(mhs ztc<0lcrK-R^(Zah2}XV~-zf7$Rzq1^jT=Txu?lLq;&RqtM9!Oh)=mw_6p6!Mdn=3{ zk0tFhSQ8|k=X71N}}zXM3NHzP*tSdnfWD+e%Hdfq2NHLzmh7b~Hkjnb^o z@e}UK$uH>hny&&R9?K^1*5DP<|65)TpNuac^#5@dW6$U9;c{iA`gRN~XIq^oyZZn$ zbQ3o#UIi$i03XvqVG|>*k(S3`d?7*|N^bAAZM#m`w*9!qOlxFmx`oFU_sWJeV*|<+ zL2x|g6rK;sA`dTyO^cR<@H3VP($--tiGKV^C+yjA+6jA?=hrOFFD+y+ZaZ;F=}!`# zZKhB7D!-1y_tQ7yMhMSwa<{hl3y>#$0$OiR2&xBrM7yB5kPAe|9zg}7%IDedwxFH( z1Nc7pJ@qS8>qA{l?_+r|?wWrW=m)io-ZS`u?HiR|{TE`=!`bO9uZEn=OeWDxh#^7J zWLxkT{8T$7NSv&M!G*Q5rX)M8qAtU0?`^mAS2yxnx3Tq1K$+V!+XV~Nx ze19RQ>ar#K^Po3HbUR6tPeO0YV8gvR1vX!`QmY9b{MPP0KCkQcOK$He(iMOt6pwWC zQ^rWwJu+ldEU!6rV*KQVLTT}o@rhH`6sOKTvAS*~ofuwQtF0eP;{>EDoG3!dAsX`k zrS8gO>?*JO{l2@eZ-4jA+qZe!EN|8~i)ZoJ#ybU!V~P!!V1o%zT0$t4s!EfBL{ha$ znv?_;rB0i?xC;O{$Psmy@xzebLdWM7)H)zJC5dF~9%(#@e}qqxl2p z_FZ@=y?E2?^o>jD)WVIkzbS=90y+b4wZ5pU<5`6l4c^oQb?>p-QqgY`J<&Gb#|nRz zVm^I{-k%?>d!s!a4Y+7e>}kn)x(2i-Nc5=9;YmS}6hYBfhlYPjiYgcYMe!%DbovN8tbCAf+21^Y&5M+}M0N zMKU%FX~dPE zrMXvz78_r0E9$-f(%|B1p0yKHFV9h}Y_)OZyq$Ek@<>xFk2JON$hB!@A7$QaP*Goh z72+YZ_N87mWrF=OPBHl6&ejTsFxREh8X5qe?}uJh*;D;P_H{1c;=k`3%nvgRJ%}mB zoZgl#1if!@OZ7oe^x6W1K2i1Z-oZdmus!s-&c5H}>pbvvobokZ9~K?3t0V7|$BkZ2 zyC=7}+RI1t4$p^x=LY09cFWtG9QciTJKR25KH}kAdvb{`x89*Jo*v;65OsuFJn4jc zJJ;$b3HWW_T)s!$qKezxqI#h}PVv7`A1L!`(+3I)q_?Jis4noLi}n+g~FRZ0UG}u^eO3}Cy_cxPb9>ys?$oZ(o07= zQ_f%Lp&b0?+d%(+^49_VRgkhf$C7nUQFlqt(HSQvnAlb$XyxF^xQ$|V+_p!akH}A;b=M?wo@Y`&hOOcZ2k=Be1V?#MJi`<-jkl^JW9hsz8mGACB1Sq;xn5^TjL?% zyP<`0=Ln19))7_|7R9AHqgS%|v0tE{K>Tr>d$OAchY98N5 z@NsRxdaL~fAiIXYhd$mbF5wAo0#5RGQH%-JRYAO)O-@M{guaDVgYk(DkbNh;4M!&= zzglNE+@$parFEUsYS*3Fv2L_>GaQ}hexjEF>BPbJx>^r`;UR)wB|KCev_ir~jz-zt zIVpSk&`xNHiigweMD>F^U1d|&zX9=T)j^FX)?=pbl}%E7AHECW=%`1!sEFVe|Dgl% z_=jLbMl_lW`edCL5zsAggIDkDgaPT&e1gjMIx1O$HPPtjJonc9PABDPpGw_9@)x?Q zC}uc={DWMUyMsnyq$qm?A4Zr%nyfWh#8(Bt!Ex>&cUqL2SyWYuT1s{FQ|DNx&cuK@#nkc zA&kd!z~hM~k26E?*tMG2C%=1p8OQ-S=WD%l62`wcd*cc=jN$-Y-cG9btJ z&DQ*c@%IqKYu7g|?);X;UBlbMHK3OtjAOJ!t+0kcy(C_7LpWJfOZ_p8e?TKUNf1m+ zYesgWp%_(5?g5%HMS|F%}jKEK!2}Esvt=^ zQ%cKK%0CzY{qH4gC}7A{y+t8c2Sx?t9exji*Ix%KMdq&wmd3vN7M9@xi)RO6vBNnY z0O9aSkFz{5>LGfzC4`{;x>wM5`Hz6_Iz!RmTYc9?eX9mbFp(J+`7O><55Fd}8}Rf% zFp2YT_i||883IP}^)3$W(SacrnXRK3dH79=k@p}yE4S8nBIL2$=L12=<6GN_cMCEP zx3?wlB8nb<%)7&ROP+5~c|J<}6qziUTz`F=OfPZwe!$Z0+(x}ya-Qhkj=h^Qf3#a? zx8TVAB9;3&ILV|#@C-=&o|e;m<^z!a(roLQY|mB?9DA;H5;8)HX2J z3uYl=Q~uM7r2M1L~m<3pSOLG=d-aj;Ird~aktLsmlwxTbTn=)k5!jvQc-f_XeV~_ zkQQot0?A@%Y^gD6XR?t9**xc-({E6@ew=Fl-5}9lYmw~TAE0D=r}euf;(Pln1MH&E zmG0F&(w9E&V6`5h`2@M=SgsF-*2;IfpX?vT^;=%Ill@`4UM+SWE#VnPD%e4Sk$S2* z54BOhW|Y*najKzVQ+@3L&yYx}Xt6_Zvr`Udsr53P==zR2=?F2n4XR_;D3va8%h2*k zb`MmBmR0w0GqCwgW*4ce6eFT`xH)?{Mt66$#|rPp$1*`+vT)cALrw*3JyImso3Vw(d|p5Vi^N(?m4xXupKlKiR;~PE6(q-7l9BP7F@58BYW?oR zA74m?WL`}A)R=7Q==@ks)TOXVfOpW5gCatU-hlw-CO5Be{|IsjI=cu-pLWQ50;A1B zY#+&(_Q-O)rZf=hl&|2;{{~v#qO?p?jb^m4&1rc~*vwpVoRMcWrzk2T2fr(~XH+xyhq_n9Z% z*EGnW8XK4^wV%{aWyXMER7;Bnf1gKWdRg&@rwZSkyrl$LZ77dUW|PS15MsjFqS&Dj z3(az;X(U7@EsoW9B8akWdiCty zPkt=Be#h$cfw9nuyXFoOQV-#rr~13JeuL6_xG7n;hpPKY>8;(}#XR4iN_Uq0^}Tus zFvE1kf6J|aZ6g{%kSxi}Q>M4CybU9pO^BwgNJt}k zea{wPt0Li{0b6kd>$0r!64CCEuBd`SH2#1K<2p7P7o^0HFs^j&fXM11cZ561ou&Tq zc%4nm(v1{D%fuVjX_ruJBVKy48_2ewYtx=XS&vNOepEdDtHDrd-s zoiC4$3Rv`OvJU5`RFDkOFJrPVRX}GarlS^UEos=4{K**nC7i28RkGL3P|WC9C9;?z zUE%_KLp1`?gkQqL4T;w<&K34& zHnO2YKndw;&}U#|X{2Yv3`R9MsQB~2%u1rV5>O2+>b#_q-7SQ6F%${2*pA!$U-V=A z4A|*88fB9njV9t0%TnRk1PTNkfeM3Xs1U5W9H9_4ku-Y!<_GqURA-`7)AjK9k@5;s zt)if)*>Y_pyFQz_?m!}!j{Fu<{O5f|{LJLd5ADD3*{N{cN1%#V3c0^aCv*hFUHLP6 z{iCNp`hYI+FX!{Wz&yw5-2a7Mz~2P>aT`Us2-A#=AliqWiNQtZ34CgUx(pw~D2_M{ zGwz!t-N0-i6Wc~bp=nr&tM0<4)5E~CqWSzBEr*d0L67)FS+=4|0M{Csc>HkfbR(87 zW+Syy&M0QXwv{xKX$3?Kp(w-|F)_Kns01~iuQo9j&?CAY`EjgJ4b0YbeD~`>Mzv4rmtI*S^*MvYpAvnZKf|?e%Qx zyR}rF;+Rl+x%a@Ars>-yv*gh_*|Z^Z(E!8qU{8cjXK(Q;@*?ds5CWA8av7+ZbN1j| zPpDb{%iO-WA|ys%ivkVlx7cm&Dd($WnQffu_(= zH#>?R6Q%tP@B%i*c5&Qp?3c){Uj6F3lBOPA9ht2pcvCU(sG5-HDuH|=h;SgD4OFI- z9DEzAuZX4^)fXp2aoQn_?JXwKvI z_znud3!#vtX~$OLGu1e6sK!YxuG|JcKY$VW`Q>T&`6i-2Ux{f;li^YL`8w4zwE6z# zPw5OBLpdrq5sd_Twn5*+yz>NFMv-|KFttV2V%YT2IYiCjwK9RKZmgry-24h!aK3PF zo|?mNXs@V?^QS`ZU^)2D2%*1#-=FrMKDaf;Hg{^@gjxeLsrbNrmgqwuxTkvO?6w)W zB0i18<2S_;C2X5=@^h!WAV+Q*I|H}G!c54dG&(6aH z`a^UQzrdAfHIGyl6LmHRLhdy~a5`PgJkiNh#4^MCVx5-2>PwQHNGISZ#dJj%{h?66 z}Pi&$h;ucDgOc1=?X2PgCbEZ}~}z!^Di?d}AGLy9n6wflW`ihQ18m zED;SDsz_ZIk5h}0%2?DSt_~wLTFYTs^VbvL*@>gGqSF&+!Do?1EZGl!t3ckL4u1t) z`sipmZ-XO+&%yW0;YdiN&JFXB?(^VM4K=V*O{`>sdA|~rwP?^piUQ#R5_3U4qy!3~ z>_%pPA&0TXgYPA-jWm{Gxt0Ql>B|T0sW`aMO5L)iYw1;Rtfm~HvV^P*y@l?F7>Z1a zk>(D5*#fS zf{~1+!CM!VZ-aAN*jF@gCKf4_a-{TJrh%fk* zM|&S1YVAEeg&XK5h`|T@>{DLYg%9lfY{b2aUqQdmpM^|sM+DK@YW*Yc7Has%9ebh? z_Zj>;dX_&iJZ|KV`YVv}Mz1;;;y#9JD2y*V*qqw7NNH!9`h0udi7$I5l9;<4=g}yB zhqJ)Tx%e^$eIK2gMsD|N_P|eibZBt@&-MZ4HouKt#BXu4+&xql#KN7SlO1Vc+kzk3 z%wBRQ8rTYEXY1?24E*^5F>O4%sgz3OyA7hXeA6s8$jn5PSUnKuP1%{XJu_woGXAMp zcs5~1$1PP5!h4eC1{M;zlSgMGmL8RL-ikboqh{vVz6oAPW_i$dg%`Dvd?;9}77g|R zmDO}r*DN_&87Vi=<-*45!c^5)u8N9So7TZSE23BqDQcmBWg(u(?K^&Yd2~5w8b}E@ z?u@0(%<}%58i8PZLPr8`j97`K`AMHIo6Gtt6$mVAQ+fo$am{M*8Prd0uA!&VWyl*z ztVi}t5FBK(MR5;}p`u)g`tG3fMC#EkwWd-z;>5^hGB{Umt2=Gm5rsb8tTBahq{dW> z3#KLTff0ceURbFK>7mq)MN#+ftSRYVH3JCTg})F(+80}kOFMQ&QN3$yzK6b#zs{X( zR+b3vqyic<-@GNIVH!CRYj`0_LpCRjr7wJr_%kQ|09QuhDJKGv96v`7OPGY0G?IBk zfLu$eWN*mog>se51mb||kIrNpPaH$}vIy^Sg@qx!Al0Wo}46B`E^W(G+8`K(~`6u_TAx z%&5%5vw5QYL^LGPTSsqMIaA^ZB*0e$lA%fhmW^Vy;wPPb#UO-A5| zvV=BUn~iOZF~%egcFblBcAVHOAtVqUVT(fy0Ybp+;6OqWKc3@&V+@ZiJmOe0@0_~T zJtJ#D2EXsidoLsP?V8(n`c~CFbIu#!GMP{Pd0E5x<1?&veRA*H}RR@?gDi`{5!XhlFI;WF=0;MDI|dy52SuMr>Jcen~x< zZA>Mbmjy&6^wW) zSQ;(%jTgL=4PT$>c7Mhw2D06aqBk9}yjov6S+zWR&Yom6nIfC0LH`CQvX5DB5kqvh zIZYl3lZlKWRKptqnDAi867o1Er(_x%_~7-Mb~|D;mVK_R&{t_&p+`IrnmDT;AC#47z|Kfg#tuXs?fzy^?lbXlSq8=Wffx0N zX7!ln_qZd17;<}TuI69C)O`FNS*LVypZ#Qz^|_N{#8KBeot|W6Q&8<^opx+m3R{n@ zc2HJvvS(LLc1wcSXZQ4#ZN*e&(*!0A{gcb#SXMWbn?c3Y77Yda4cb_zZV$^N`b*(jVAqq3N9Q@lsB*c3`4@QYx!fB%n9Kezc*gqHJnXb=BoNuUOku zLDG1{t0S>|N|uMru+Q}62hwzG?wh?65iGs!je`;*sb)k|G~m2e%w=kH?lak_*V%JJ z=bxDGrR#)e`0l%Uo3ny2OmuwIynM}2N@S`;6L(}W`RJ%J2pqYMY*9V?fJFoEqFSUk zAMUeQK)UD)1(cM{Dxye~E z%Dz&dk>0s)SI+3^E$OTXy(a2duSYQ?W(USHH(H^m$j5gG#`o7`2OFRBY=E-PJF>G% z^mIn2EE?R|#eS5)JI_7V7ZvE?Lspc6$dSJ zyMSH--A{%7?nGufs;i1FsFnas!>fwAsFnJXg|IHrd=T?N$kt!Tme~hWFGZ3OKdet_T>9X?N`uoocYbczeF&gsS5> z@Ik0e*=6nz7LxWIufpy7cB&LS##s87oHbw(fWpZs33Y8?V33UP-Y2zB+;?*N%<889K3*fXM!>Ub(PrpkMBG#$@xE-BevGCjn%)BGd zGH|HC{d_RnCYQ4~%MD+t6>+bf@F{Z7<#jC|9v(S7 zAgJ=(zXQsoub2HcHB{kV3LMfCNqO(2p5G3}|t3Kwkv+WN1WT@jj8mjqSx

za(*i1v0_81P`mbzcx`NMq-0S_s zqh7C57iE?%7v2PG80Am97HB)uGRxyS$|Y#N1IHLy-(q zcq0IFA)dtL3fv?YrYxte&_(wNkHPBkaL(bYwryF=9@q|E;ua?Qo!S~dRSi?tB)d1C zFWcO#C$U1hh+ z^jCV~-r`h6^J(Q~!Z;F$SfT-VP;^JSV!=W#=aWn&kWQtY=XQxO0R4Xg=BSrxg*Z!r zs$&(v98rG9T(2rFiVAR$(~deoJ(FEK#w204oX}<9Ae9m7TM0IBPp+g|7<% zODm6Bm@`js7rVP$vM!$E9C7T|r|BW#KETars|a)Kfb(otk)|{fX4IACEugM5b8@x( zB_}IayNE_uObK=1(DWcnytS4m-g@klREybEadK}Re(W(dmQ1A{|AJr$?_hZdCU{TVzW8(HFHPF*Oo>iwgtvZ!Oo{Xlf9*7gFFn8E z|C{;XC>fbQP2MWpPZAxZ{VTJOy*N$R`EzppUy~>7zxKi`Y#N{>JO5L*XRg`Fu+oK` z5eEfwRdvAeLg2vSt#aYcwz@L?UU^viV?p)3RSf6*!jXy>?x|U|7;cQ~O*tFmynuWa zwu4So=mXNS>3ES=MzgMSZ-tYc{c;S)0bSiMJle@{AFN)?4vWRxDi+(|&#{@JV|D*e zx2J>Dfe+nIe+2k_4dX&yF2llACsw^xbt=y~!g9)q6|1xVfuIs#k(Bt1CMa%xWQip5 z$fY>vYH=$|Xiy8qa{)ON_C>mfGX5?tq{)6sQJE7XUXj(jE?TjWU)4eZ-RCmBAD3;- z2+4}y%Jd{cTJOk`q!{k1M01U-&Hf0TkDB1g27b}IB_k-N{XXjL?&heDYjX%?^?_Q*7Yr5(2&;Zl3{pGHRC*iSRW~y;R z2xSUh+s?Zv^>hbxLjnCOmGETZKKkhqP`@?F8yAXaDc_X#p0a|9tOhG9>z^N9SC2|k z029+FFt=)<)(F4XvR&1zWji+QI1%Yvgj>k6j`lL_*s^&@f$C_c!FFtpqoOLRqt`MX z+enG+tpJW@`*?QJ#3%x@n)JlAtf`pMp5)+YPeySmu82-mw;n^-maXUP=)me)W4i38 zG~MMJ6?V(Ok^)KF@WQ&ZhSyXFs%u+`jmrl|(T*?3zoFhR4xRjRbr0023GIz!MFn?+BX$2NrYu&mGhiA!sy z2fgJl#Y$WK*79grva}DZ{7bi>DK{L`48ir6@qyQT@9wbB6+8Ve^oN#BU8fHG$&3|7 zj3tt>`Tw8~3vUMvqDf|$j-_(cl4TljmKZ#i!)d|%qKRXu&E$Az91j{+r>3O^M_Ywu zDQ*qchtNC7&%gs1qe?J1mKvDp3#NUl*A?-(;$d%YY-6Q3S=_F4nFG^QT{2*1RJCY% zef~6+n2 zIxPc+VKK))qkjl^vILkV$azJYS%|$P?ToEGC>Bxd;FT8Ol?qm|3GTAkb?Y8Tyx2C|sW%YHpn9wSpeY zZ6nj7QrW4K{MMx?CS@u!IQVMS*Mxtm??h+M8`fIv1(-mU3|&cxT9M#Z!h6&lUpyv_|D( zQI@o{KTycV4cX-lx-EA|)_*!AgZy3RI(--+) zy6o?(ROsRGvem1nD!MF*f+SWWq2^#S9L&m}EssYuQ47F;Gno< z%|tSyzQIgktQ0_Gy?#XVYXBBSMUD;>!kRA?%8!QxkQE{bsATU@rhhc&4n^q^*ZQSH zTSk%|T??8zIwnjDF@vnjl^(QM%cfs5;<@gK6%V)q9+(v)Y|5^%ZAJ_IfxfO@NAG{0 zJ`c3Gj%B_@SzqCFs4+Q*R5rjfWUb*XmMP3JsWA8%VhP}o5wtVFO>(;|)8QQkhg(KR zog|P+W+p;V1lLj7idl)O?eQpqpe<2dE4r?@(!#Z)T}SS1kgm5b>KE)k~lK2N_6bheE7Op)iI{@zB=sGnlYJnk~LNR0&8?2hvm=&Z=GF2st| z^D?^?I0ffnEx`-$FUDBDdtPplxFAz<#%#_1QrHS(?(6Km?B=qRws#(~4J1r$6*;TT zT29_Xj;pEeDK2Z|%eW`R2wO(bh$cNK_wxn13DLb;ENDNXAG_vVr#pGI(;^hMR zFQREJ980)?Eh=sW1c>ZXHIEjp20^E>%05vNR1YW)qFgGOSYcE|^_wjP&FA;2cHWk4 zBVxMCA;D{yewodrDWdbya_b>Wlw2OK@M_tGc369KK<1*&FplJ+aBWCt)R4=Tg1E&Z8s4O@HO;nhgu9eDhMZo+*sFw|I+% zynD%wRm-nS5~ybr_r3f54lh1U|B=y)pL3o(UmF(Cj9f&a-P<++OrEhK8cw~^4(-sp zG<%?YyR0ecZX6qsiz5BV&@$Z)&4|FEKqAihqNXNvT{=itGiuS}v2TApA6CT$vSIOW zKM|6`yIFqYcJ^y#*}gz#@$2cM9(#8=blK^ zw@=eI3lH;b{K#EU->xxT8zUYYi>w6Bs_GPg1a&q^F?yY4JZgnNpcGc5vVeS)0 zqn0Z6?HzZpLXPjfsWQ%AZ1dv9`((I!ZJnn`6m4F7K`7zf7hU#=*(y)d&w#whF?rFp zRbo^H)(NHdd90J6dfRwGy^eL_wnf%PyPTwTX1Wm8FIXwgHX<@T{-^YW@D{R~-M@#+ zYNrpi-(dgn%d}Hic+0Q9D_HCAY?Q`4U8jX7V4gE9LkrSDtlEj-P#u#(ZlayB6Yv}x zDrWqb;SsPP=axlfSlhTG3&>S?;`i?bzE;rheXwW!(HZVny6^3$?>l^w(&I;k%#j=E zjfd%~n{HB)Syhw#lg6z#PF+5pk-|wColBN}`-v4BwBl^Jd$vp6HaWFfyT5m1LESzD z&js^cqYZmCSxOweVyf!k3FPUR^8hmvPE?(|2VSC*ejYv0ZVmDlaC~@R3y%9rJ9B;K z3_MuiTaKCcOzb;FS@t9z;76YZ5Y`hg{DzgX?Sju+3iz@v*-+Jlr5SQ@cirFL*XF{~+2kd~%vc1A6~dBU`Tp(2 z%^NRCi3-A4+X{C33oDYzHN|i>q^pu>xm$19aQtCs?TpSJ7KVg}c%9s~CmHKjC@AL~ zw;LNVJi2@qL6$?npnSqZ3}!owJZOi60pNR777|7DaV536*R>1O0N^$|IBZ3&yelue zBq?BrN0S29mZ6_SG*;MJsqC!&KyU*blN1KW=5BX!z40{tUE%jYYs9E2)ESFaM^7Zs zcbmt0un4~i0s0dP+9_smuX9<$i}E5xxv_ zI!fZiiT_yaA;OAZgU;Nt&@Sk7wmG;ktx2H)OJ(t|i>6rkt2T%|{by~2`?>ZUf37pb zImED-@D?6NXLcFZO)u+O^c(Pqg_+*U=|ME1?~|9sk=+Rwo#Mt_?R}#2awLUd!!D*z|8R6`xNx)46**p}EbS zfqq8TI)0STfZVAvYb+1sDTq@}+0R7aQFA-vmB#duztn z^R=@(Mj`V4BQnZMm{)ryg68E>`=IxLZ7ZMIR{&vRj+x~Wi+X$H% z9+Z(Y9PXx>>utoG{4Ic3CM! zi~IVAi1EAU{c@$Eei?I2J39H;{PT1J{UPA{ zCf`ri#`sQrC3<6UKP8T>O6_qvC9oj2Iz!nxB_ArG%a5t-+&Po?x07dp#u5bP?Kgrx z*FgW8L|@zhQU=QGi?1x?N&sM_yX2TDyHq!5b%lfy77O_SD);BLkSs;1rTAi9kqwn@ zic0zyB~UxD2461ST9Fn+4>PkB(wdZt3v+igh7Gxt5I%+p75*#lt&^(?_fNh+*V5-m z{*2h!9AD{CC(dV#89Xsqv6pFdK{nbF<{%wpDEkt3#Qjygo2AlsQYz`OlqzbHDB$yW z4)CWQFbzYP`$DqMRzyuytPs6d4=9Q%iVm-wn17hQkNz7-m0lM88bPEJR2vGj>Ew~Z z7Q!PMn8*bKB=TxIK!Uz+fTFGWJGy38QdvFq4jQrTm6lQU*@B_mvgQF(R771~HEpV@ zU_M|cFMIf?EqFcAxx48m!=-Oob5q^$s_mRI$AK3hZ6IZYsxM0{+^-$=_G?DCG1-WI z?j-#_xs6=IpD*t2zpUz1IoMfsa`GR8*R54YZ^t+z^tRQij?RYqSqXR|VOm+U)4I0P zI>lSS3&!X|A`<3_NITnaduhj`kfdRUd@9ba!9{Xm!tqovMZE3X9TU=&$rSDhZw+DG zf4J>}Lh=*n-+4{T1=M6DvN^9O6_1%#SQS;p6)?k5)vCs2AWT6?B{2Y8f@;WeMH5L+YH1WyciH7JJ+>%B>rq9KT?(dqi{=#pK5@&69IHoQ#=lQpif$OX zRqCw-z2*LDFxCUKtf_EW$xhE^WUq3=NM6t+N%QHCDqcA=o7Vld3(&U;JV5P<1*`pK zZ}*Oz4oqBDTtEfcRe4R*Y;9Ar;>RdlSqu5z0KaQmepObar0pADMxQ1dIyQg=kAs?Q z7xuH9&1{Sy6~2pikbms#-Djte6Eo#`icX59ir{RE##wKG)QOHI!@VB2A!H)iUP&}P zR&T6OjD!FNskpgSg?@SMNFW`T#H8Q$f>I5nC#bUM$;CaO==y_tBBXg$fF*xwSh8gZ zg4gHi@@ET6&%XMXbK=l0ia1hwNZU`fFUM2!-;}v z^9arhJ^)dMv<8biEjUtMg^3aef!2n+CUyI424tz3jOsG_kI)F@)lx+pH~MN}!3@CC zrDh^%ff^>fKT**^?_hD6etIKGhM68qR{r0-y?jLtS9@K6&N$;?K;OST>7%k0;8aF z`Nx05$=P^*2l>l2Yn#vI9(R1>*Ml6~$g=FA{OjU85d9B`b}>h1o7NBs&%(^QfgYV~ zrro?%8<2C{_kAX>LmjDAItTL_tdH?rI}S5k#5Q3cWPcmRT-y&AD5=O0AA*{m%Kgw)ld|;o2+SqAUQsD`e|5y6ATZwGlrnt z=&w4vRUOpCShW!3euf3mkXHg2sp+U@nAH;}p%JbRUaAL?Urdn?%>;n<1a4;E&3&0XsGnk5iu{ zYKMm8u8<4JO$ijOeUq>XHJ1wC4&Q35d~ElX)HHC;sT#O^oh~X;+`<}ylBi-}R3Pfz zm}rKf6MzSy!^7=5Pf8-7t8aw}NuIWb-1bH2}=dL7i@?!s#J_HyNeb*-UEV+M` zubMt|AusfKJ_C?z>@{kYfMkt z4)lcQKCOt7j;J=Xx|^zV_gO(UC$;%H{bTyiq@Rql=+Z>Bot*^rYmp>uCv&OOsttrK zCu)zImsknN#kCOH8!?zqW9ZC4T{&rk5=#|X40w?UEf4cW#_g`jYJ8v=9||X{?lpFI zF4U;SDqgeuuO(S@4;1M)BV8WZ(Au8GNORNZRj1M+kJosQEBIqyW#i<=>8LgLl;z~0 z%J4lSj_nC8oaO3EtFmJ#;f3jd@d?9Qj;l&!4Hr;iDJ0sg>=U za5O%Jj_-D~IZ)a*0JmYm?I=}4?quL;%-i(Lxlkc%dBRm6T@lFZ%JlNd*S%jH4ab|F_6td-40d20}(wKLe@&g5Kakuwn|mYH?LDw8E=#?K)!a&QJho4!L! z+x~>+GrV1vua=R0VKZXt>6wC+%V%ApoVBvL-)-eAGj8cVT{XRC#si=&xipa_$c6 zD2igypQL~aB(*ER&;m8Ht9$4QQ8bXb$Hud-dw?Er#WbCXg1P4yKPFA4okXjSjiHY}CzU-PnP=rG1Qr_xKq9tH0wAxMD|<|+54E#9acNJNQoSpv zilA}!#k@ma;7wXA1CpHTs>hcz71;=;Caw* zXo)yz{S3=XHb~gH%~X~HE_?#Q4OEu+!#E|;e>Oxm!A~fP#D%S3PJyKDJPQ!CbN?Rb z37vU@zQT$*G$YK?pOBSoJZ)<}?sdrXwZm$tD=(${LyJcbvLv9gM0-VnYHs;OBas8` zk;qyi!N>{_qUH;^WTYuJkmXLl)y(Q19LS4K(1WhHf|Iba7pTl-g(tcV8E#Qh^ zh>i!FvGjnAYHPplf#Fd!a)L0S{@&15-L$$wqnR#uFm{zE0WvagrCnZ+#|=PO)Pi2R z7o?5^;!%+`!GYkWuS;I58O8GYp2_}H(48CZ8?PG1Q<~#rq<;ZpDzHi#S-##!_{eb0 zPqP>=Hy@ebk(q7J8;;C=vBhkWV-dK3>|EK=O_|gM^+g4N11s){WWU{I1!JyIGnT5` zAo)@0=<(rlv-5^08FVM|p0ufks10a2DVvy(t}DjEhT!Kwz*nB_ndndHmItT6PSADX zv?U0BRn?-=jhZp{St~ozJ6_ez{~^g;up~7vy|EMp0=E@gQhe?myLZk65}HZ4FLE=V zk0_rFXC{8LS$GMoF5<|y> zs+962rnrpFG_NdlmAYqkwknlCNL7r1$;?n+Qu&dbK0OI4Fb%%@w6QGYXRGZ#@v`Cie^BM1dFAaV%>DZ z;bg`ap}MW>R!Glgx@O8vjk*AjD>{8d0Mz66=tjWhiduqb`iy8JyQEn18K&Ee>!$8< zr;~wfh`Kz6<_@{am1d?Ga*1NlPX!hmPhX(2@Ga1Mt4zYloG&1Mz=#Yd-UN5@i}@y~ zUW{Im5m;jQ>{B}hVA&FAgL>AZDVj_{m$~~K=LAgvVswu#9dQ9S7lM`+tXJ~txlR(S zFZsX7+l6~O`}Vb}<6YxLHc&Oh-525ZrukNLi6Ehsw9$t?AO&=rUcJ3>|I3A=H=Zn8 zhM5Rtc3rzO8T9U9oAy(~Wbw~EAY$$myuyrdLU=@YUhEUMiMNRlil3DvX`A#3xhcO# z@hPX252;P{LG^PQ(Tdt_`k?+nqiCEkK5k0psQHljtSjca!u2h8!2Mlok9EKGtY?eo zYu=joI`60Ln*C$n?Y{5&*Zbe)|5_jy7!4c=JRe*eygx)j`$GR1elYxtNMGdks1jWk zeLR+nT^9Rnyeobv{*^>u;th#slYfv(rQVeKWjc^vlm2-6N13ZL_hg>SZp+@AeLS}z z_nqAH`A~i^e<=TWSH0`Ot|tq36~0?^6$gtq79T5qr=*nzOZ!UiEj`}7rTbewTY8=^ z=gRl@HhaI-_h8>Q`d9Q{-T(1Qt@8Qmk(yL{WMFjQnfhh*&o!1b-q%btk2Js1%C&A9 zY!2Qv_}tKvp?3^R!}pIEBX^BFKYDER*|FW@WcPk527gl3VhorJE~{>+L~E50;6Jbmx<_hz=uJhSrZl|Ndwf7N$apIW1=dGL}Im)y6uvi8ok zpI>LJTeI$K>tpLbHEYZc&+eOj%j|bHNE@0P?%LS3@xD!&O?Pf~Z64qJrp-@o*|z18 zEiY`{zV*><&eH?|ww*Kfan`{UbR*iqhb(~hrPw(7DEUG~)FgO`8e z3hRm$SG?g$a^+oD{&;8K&PR66T{V8yhpxWu>R;}-@TnWSZ+yoq zrB}Y;mCxKXeA7cW&D~tS`Tm=qy(L2&PDW^sVC{rY0lwb?vTthsZa6Mw$7SreoE=xp zKL^KY*1m)t*Rta}c3jVnv+THm9XHP31;P)AP5yao5Q~0+w?(p+ew()?k`VH|EtB=a6mKgeCfv>2D%mIeA#ZCWC%SoChjx{> zP0}rH!MNX2pTS2TmM2kXgR#IJ{kda&M+y zs}1#KZn)uw3S5ZS!NnU7?K*z^z`@MAy?YLvJiKY|wI>hlI>!IB^OvKUGcOy(uNpb? z+_t^PjvqLBBr{MMs11y5*qo`=1}g9x+kf=LHAjz}f)8qy#xVaQ=WovZhRlKEnO&I^ z$9C=6dwAEegPEiI&K^VMtTShwnAtQvoym0XIeKK*3Fy!byN>Pc*?;21b)(hlnL}q6 zo_QVlBa$H}$bPaH7V2&|9wB?+%r0^eUKxOc2gqS~ogl~Hm;o}}MUL~|t|uqq`V6U) z8vGj~ebByv{j2bAY=58g%^S%f_SeVZZx6z?>)_lTXrF{TY=ZW+(84Q@z38rI-*XiH z`a<6}+UeEE1+Uu%?PKhdqtMR`sE!qQ)!;QoHh{vFVc&sgYy9kfxbg(KhFyP(|3wX6 z4P5$X+-dQBF4Tt%ydP&Xo?&C$1)uDN^EeN9?NPGtqMk$LBCh$>SIoeZn1+8D_}2|R z!Ex?_E1Vg+f%R%H>4AAT0spRpnXbaW3qAFPZ@kd=5rJ_W0z9!!&hbN+z)OIuMBpGQ za0{J$40wYJ_<%(`#0%Wa2WUS4s5t~EFap|P43J;~uvQ9godfN#3$(@}aLI1CU77R( zx%NXBtL(wn;Q=;*-wpy94+GJR!lTCQZj)q+EFsItM`7xwVJIufD!|Wc$R%VgJjC^6 z7RY2H3~)2Zx2-@u&$sObqMg;a`ytQIWi! ze3tw*`3QL*`D5}q@(4VNyUE{?zbF4l?jip~o+ST+{295Iypep6`~mqkd6@h+^4sJM zfEw>2|C9Wf{0{j7`5Tzt_X8<@fDz2+$>ZdUKr&w=Uxx+tP4cJYY4Qa5 zTX-zauazK$ckIZZR9oN z)#N$yDEU6wN&b~QOP(VCOwxe3C9;%U4G(bf&o{`O=p+Zbu3 zq#+un5gMg28m9@0s!Ez>XqM(^o_5g!Ez%P0raiO_N?af9r|5#MQBYoKgEnc44$>hy zOh@P_D1PHC3&0dzLYLBIbU9r?r|Aq`NmtR;bPc_PuBGefdOAxt(2aBx-AuOt-E9R4 zCdri}Cl4L6uG@R;z|lR|>^*W~@3Fmmr0J8#j>;?c9y+m0IlSwdV@HpuCyyMcH3w?W zdu@Qf5AydR=e@DgnRak-?pCd+sVbhIe%a0^HAsg ztn+@?d4KDCJayjhI`4O#_q)#fUFZF-^M2QPzw5l;b>8nf?{}TgUxVMj!SCB@|IY8< z;P-Fv`#1Rg8~pwae*Xr)e}mt@!SCPT_iym~H~9S<{QgaTza}4lllP;^@89J2Z}R&$ z`Td)1I^g$j^7}XW{hR#$O@99-zkie8zs0}5)nd#OUIy^NK0*)eI&lQJCss=&^YiQz zdT{SCJm*}8yRMxW2@PJ46Q_Q$1uH$k@H?V+#O{dP5w|01N6e0h9q~G%b;Rn3)Dfp6 zN=J;22p#b`qI1ONh|FUQnIkSoRF0S&5jo;Ur{;%!9Rh_w-EBhJQrP$;6w{{~P?0|XQR000O8#GuGJY~r*@vUvaikmdjY z5&!@IQEXvzb7^ZrZ){{=R6;IvbY|?lcbFqrl{b9vt(>c*3W}vHb*qEArIysWC-mg; z%y`E3csw|aJ>!HUIDie<#zY%1X2F0LmSisrOBe%Y2QR^7Ff1(DBx5jOSYUU-nC#Ja z?k%ai#|EC~ecpe*Ki;0Hqmopry7$E2IX57T5VC>r2yNN3d*6fad-UZ9{p1A*VRiSu zM)Mtiy8I$Ie+gW5&S}dhoqoyuGe3%ua1@S*&%5-{6+el6Z!1FDD-n{+^RBrnk6gGC zxd|ZT{3|ZFR6Ov8E<&+4!#$pP!6nZ=fBrq)+u{2RLZ8(yJaXvp|4iL{9KO@x`p$*$ zfp`;l9lT$L_vH&Oz3N%7dvErG@cw5Aai4qok!M^c{7iTjLf^OHv+5<6pLd8n@a14_Xs^whUT4p z#WRjvaSWjmXb){Qk0>uN+VBbW+n+t#ntvQA+|OulKC}1quSc(sUe>#}$8#Uypub2# z1Vw-FJM!PX(@@Ag3g2(%KEm#__U||Bli#4bkTm!O{HgFe;0rtF@KN$%B)}&hCbz)* zmqoAm`>2WjBwK}1FZy&tB;@QujcgX*l^xN=C^p`xs4tvb&(Bt$wc+4a4 zN{=s~3a+3e{xI_JXV5tL462e{Ahmx-3A_Qd;Yj08pmF>d8b%+7JCsoxUNgW^6Rr>N zOVJ#>X5gs6p~Eo*hmVhoxHH%V-i`2j^dbzP}C*2g&%o@VhUf3G{W) zfd_l{;yX|SeGPbW548JB(63JekG>A)Uxn*CsD=;2J+}ZKzJkW^tB^`ALMq%t!ncF; zo{2QvL>B^oi_p*Y1Vg(&13GmFJo7a8{ilF89=v+Mr|%*Ke+`bGfet-@%Ak=obRY2N zG-wmO_LHbUZbwb-QF!)Nu+H_sx83Lveg!%WA47Za2)Ynof*QcX^Kl&Q#7EG2ZW0CL z^GG2FQJFJ=$Je6(?%l=T0X}{dFDwCJNGwADMXfxRW--n3*YX_ATz7=@B z<^VYjqi^?q2FLf{_&Jl&Jj$~JWYvF7Q8|$z)MW?AioP-#4dnEizjC}5jvIhqcR>5r z93Y<|^!45!;P??7zh$!d0`%$02bIrPK+bCqDjRyd0L6fZAxZZB1Ts(Jn|qI;U-TZs z4}e@hi{|lndynH6f=aQ!jC}zzJjXwmmq`V zr~pS1pNCFm=fGF;7toI~@EU9fjud$X=-Z9^S=^xV3ALJgAk=%O%^hg1!A{D5HG@v@r zzaRtYKuu%-wU7zaMpp07$U!#H7;=EdQ4FYy;=MnC?YcmHiN2Z0_&PwV{@9YJRTJs+I~ z^a4gNL}&MYf-XYm0KFI;0{V1xF3?NRdA%Q_OVMGVm!Ts-FGuGCy#ig(djwsHE(H1v zMxTi;0(up?7_9SZ^mL%tpi6*Wi!KHFEOc4#hv?bpa-i3tD}Y{)t_1oV^o-ua=(&tO z4?Pp;4d|-g4q=~xc43O zYDQm!UIO$E^irU&MYr_6jb4Xt1^RmQGN5;&mjnGr^orht=nd#Lpl?L41o}_tcA#%U zuj+jZy_wOspnm}RR`hD1Z$q!?eG?-89YF6!uLb(g=ygEfj$YsU2KpCtC(w7Ge+2qY z^ah~+3iRvfUFeNK-;MqW=zGwcfW8;Kx%V}66ukxLJ?O1K--q4?^!@0r-dE8F(A_}q zMgI)+gN*(gdVB9H=otDJpdUi-0D2#KC(wUK|JwUc^kMWapdUf+2KrI-9-tpX@9ljV z-H(m}{W!V@=qDKcBzj-(OXySR{XjpBJ^=JH=w6_oMIY?_2l^cPH=v(K$ACV7J_Pg& z=)T?;(HGIb1N{$1zl1&v^vmcYy)Phm8qlwxj{*HEx*zD*(8qfZfRFzK&~Knm0{tfX z6wq&>Pxn5L9%S^}=rcgSgFXxNyXbSh&!O+3&jbBFdI0D{=nFu9fWFxKEP5FI2hbm) zF9H1#`ZCZ*(0}$mgMQ5DPtaF@{uF%`=%eUsy-%Z`K}7j9`Z@Xr(Emc;1o{i~t=^~5 zF98vJ3jGRV+^5igqwfIyAE2K^|BJp0^w;QnK!1b25A-qgQ127yx9A5zA4d-Z{T=!t z(BGpU^*)aNfF1$*NAzQ$e`55{=qJ7V(O=L{fj)sA1$rF)toJe0Lq7)!NH9A1>-s1$ z1Me`!w}22HU})}Pcu(L{9`K?Fi1IA(|0?)>9sIose%=QE9s|GbfZRj zvR(jRumqlRJ$T5C;2Af8N8AFQa2t5Q9pL#+0*|*FJl)CQ;r4-NI~6?Ie(+=;2S4@z z=m!FOy%lWp6<}$v2fMioY~gOOig$oDyc6{QJ)r&X27P}YX!|{&-yZ;7z8AFm7-;ZE zKz~0B8uB3c!Eb=>TmZUpBlw6HfzP-CywG>R*IWlW@;%UvhrmBl&W=SAQjUjlyR#o$$L2XFEZ;6+{oe&cnZ*>{3(_$TlN&jnxk4A8It0zY{* zc*b9Xeq9Ur>eXE+ku^20+ieYi2cuw(1<$*u+{!Ds3 zrZ-f|^B}pipdZ(RkGdQ9{vP1&ebB!ju&1oYd+<5<3V6zM;VGXX55iO8`9waG&$Fk@ zta*yF@|45uDY(}IzmC}P12`U{%G*1J*8KJE@9pf}%+7A?)q(D&=TGDY-#@(h;f05X z9!@-*`N7#g_}D{mr-#1y&_^D+_o3?^diHli>1)Ed%qGr-0JYIeF12} zC;GSg16URP`wKg0jwxap>)1w5`osK_KY_oq`j!&u`ao~b=5Gf)BEu3KDQizJ6znq!^P|laDyk{B4Kn#{wP=7bNb1rAH8uoeRSROfpnpe-*oi; zJ*OYNe>q(^Z~(3nS6T|M&%G$oZ?gn#7VB^#M|atG`q6diqv*gbw?ywsg`+p#a!dLa z=tuwkF?9cvKEUY7AFS(t0GyzA zWV4xan4f3QPvXb1LB0vtr3=R~WW)1gaE!t+4u=3o0ooHcxzbb>n$(6SHG>dmvB^on zU;Xp~;&`seL2C%sD~=|r3Km3}d-u^_7@C0NARL##aXIDV6mYUV)#^s(!7t`X#Vws?(A}9I&5K zK8^zwvv9Xy@E`BWhufEkK*lE2YFC#OUQ&g`QgL`PQc+pW2QaTym=MNJT6Zd`KwLNzs(1qN{% z<*yx@*~!4KS2B`24A(xBPpE2E=fUViIiKh3lo=0gUgESkf_Pr-JwomvZ-Ts_1gQNa zh#izaP2f)%_*0?U)eH$RsZ<;%_{6tyINFOsx5G>*Fa@b4mMX3v@eVkG*PdS!t9Glo zsj|CuYrb9ejvtLxrYpIrio10iymz02P1`iuoA>NmP8f<}Y9`Sp*6rN2BgD3WN7Csa zmrka_HQtT?`Ih^TM01ti@!> zP-A;9z2TK zPHYDB1!Uiv?Q2bmnOY=pYEI*X>Izi|U<*~0NMBk_YOE4d5zh??y?hb3=W&f|cDn2r zp6laxiem{$iJMA1X%jt_kW8^Wq;h85Rb)*wR3#R#?w@FEok+O(Aum@-jxJVOiH?8;Rd!<|#dKqR zHEfSpt@^3;LZ{*;U2({A%u+3}v}JoyleOj33X&ure`7jdJ++jWs|?M6EhX@8!1sR} zxMBgPxW2cIQE%IKr(Om;3MYvY*hVL^t6IAu6@5jF#WB%sS9Ri=FBib4VgkB(j}s6f z=W`N?4^{GNjFaVsjiAl4DbW!ijM}}eI3<e@XRI9z2io6F3fzJThB&^IESmnGv&I zY<{L*?JlK)BsaeUSGL~p^iwkFys~`FKVN*_o37p!6T)EA1MJ@E-WG7}zXrL}dy5Sk zP2k>O{xpnnOw7=MMQ#DVW!r)ny0N*LdS!YknMjh%WZA>j7kp)1I<0KD^34|@ z1@CLVC;{i_McqzFEG$-Q_i1s5GtI-2-j=l;Adm7LTwRWn%RHsa^cIp*PX6@Z5NJbB4x+Q(z85$jl$)?Ji_V76+crbDO zA3;Rp2w$yWUK6m$hjotiE7SWOpyaDq1qL7}dj^f{tpOOyEY|{zQ^TcHXE6-3m3+hx zv-df?i+l|9h3bk)<5U!(>3^=&iP83S0=z5JPP-$X6xRhk9ws_Z@9>(NERkbp9y@F6 zE4L=cG+rS1UVPaHb%Su2`MIF?8+9I@h%;!g#<_RFv|7zY@MPlzJXbJP z_oj36J4c*zFC5?2OkZjw3U0iRG>sr1A8zBJi(i;`V$Ho5O~2)>r5)GoUVnN!2&T{3 zK7Vr4dph)KuJ;7Kf*b_>FR(sEh%F9B0**HDy@C>`9eG)k^O#$ueiIr4|4m#hfu{8V z4EzVRpe70D9Eb)C45F!|%VvVtK<5SGw_1~N9J+!gf*Ucb)ld>-ESO4iW@=QI9kOfq zIk(=n04@@OZ7MEs7~`a5NE%$vku$z}HLof%R>Ao?iV}AWj!1o(_V8PPf9oLQI)+qe zdku~{9AnUjG^!(&^&yIlT%V!Riv0X0M8s~V*@|#6sMkQj7Qu%>U!~T1Sra*>+?>yk zud6y33#F7kr(%R}7VwIW;d8WddSSV` z@!(W_O2e8Y2Q%4XryK`C2wU26!T`h~3CTvq&sOu(z9cJ>z-wo1Y%G*b9lyw_#LZoc z-BV{mOI3Z%Nt=#eYlk3EnX5x@H6jX{E32tOEl3T=Z9!E7MMw=NYh#{2!fZkB{QSHu`WR;b_6p0S+v|w{gaS<^CD9;7*%?z8ZiZ=w#D}^ah+?7K6WHnNQ$* zz$LLb(ubENFt7GJ>9j#@eTuUm$J1AoER)E&+Su5zZB!jSE)f~}9;;{|DPHtE5!68U zv|(Ss*sT_`T4yVKbD0AElQ%;%zPM%Qa>q0*4$yeePMke<79cdRcv-enBDvgh0b=3{ zHCI)2P191d^Rr1VJ(rGr%n-=?4zR~a-dk9Jhr0GPXD6~J5qqojO+kW6(RUusNrXA`0>$Xe3U5?a`e6Yz7qCm-?PT-q znm=Ao&-zYL;}k^|IFY3Hp0+!|V_7CK4qR-0O9|6#LkudN3gibd!4z?GPd5ydv?r*B zX$JM#V!I6>8gk0S-27b1&S_NVX9bzCe*3-0zJeROzxc8f6*_@qI2@iBl(e}YIv%z@cBk&S4WTpe>6DfjO|-HQ=l1q zCZ3Tr%oSTp#n!0{rE8%t`QBIY8_0R!b0ns{6O%#2c(J%vG6k02Kmtg6QU{dLowq8( zBc5NfQ#+>$lQCYA5@P`d6XCIvuh!jc!?enIk0gc;o_pRzb8i*XtVAn05@&)d=5V<0 ztRoW}o;dOppP-=2*O=IwhkF)2Z0X*+~DapMg>4QOped z>{e?luz3zs&N2aj=1F}Gt44sCbNxP&j?#-WN|n^)w#M|1aW`hAr$>FqPBk_R@e*%V zXDYF{sg*s)m!IPllC~31)Jv_Is^zFzCqLB4=bE^k-7p3zL?+){%7sm@R0)%ABT-u% zOJFWl?}XXOn3=P*nDA>oQBH(KCy@*mtIan4S3k5gSM%G&`p^*5F|YSAkjpt}GmEe^ zvtOWb-|xq+sKL+$&>Wg)c6~SM+Nd_$3kKict4Dt4{L&bPeGBYwH?tEc#eYv%7u8`k!@s1;J>&DZ7)@0et>I6sm5yLY01At|`6*LoPxM2m& zI8O#P=~{911Yfh5%VsON<3H>}9l~%Qy=MXJkKWVjXPtio`PBxR;j;;q#} zwR)^vs#Y2A{NC^J82JaVj~Uj_8bdsAKc)m=0DT|;fs|Hb1{xyK&JGX+w^~4Zv;htu z3u(j2i-IA0n(SmjsVjjQ$hm4gUh5RB`lYm*7F2RceCfil;>dt*wxYe#X90V2L<=<_#y^^2TC&V`0RzrizJrK8`b~ zV9KSaK#&E@d6_V8dNL>oXKWd%P8mAoCH%>PkyLku4B{qE%UfIjBQB) z)8rtW3uY!rdPOs|&6FXVvIKfJp^IMYl%;1~@}|4@T)sI+G=Qk&u9!hvMEsc7@kq4| z;f~5f?EQt<3AmXLwU%*U=nUo!z-n?QiSX`Wm)i;MmQX{;`3zGIbEvdPcSR(Y}M(NZE9Oc_ny+1>~{d!bF zZM2=`cVgd)^9_n*^GJy>`us}QS5P{1K8ohh(S8|lo_4uZ2gK8%;2ro>q9!^3x>$Bp zZ$8`F+VBc-K^HAk&m`^A{Jx3m=IX^tN*)D|70;A?IU$P%Kmgv15hsQDv6AS^e+{jz8m;C@3{yJbKrrN;wJL41!y3GuB9r*3m#t6L zTmEEo!W%oOE+}}x&Q0V~In9%krEs>9CAVXaknw$2ZoJ@?6K={9M1gF~W{l(C=$_mO z1rZwUV=FHQqbBPrK8KMG+ z-=b0aO=isv_v8e}jnCA_c4q;u5(Q+dTYm)eS0sNdv+EcqX}132-2~V30O~ow()c_1 zbs;pFCp>o3w!-oF{&da}2JO2N+85PDjJ|9G#vp3C5Qe%1*0w<-vXGozXi_i5YgkS= z7;%QFWfNC@n25Tg-zr1UlZa86N)QPY3D+|`hrgK0JHlt5#;Np~NzkVslDp7G7X6Cw zOc#!T>J%#wERtr64VN&M)TwE-S4sx~9h4ARt$f9Y1HNg~((wXxQR4N=a9IWG&YAu~ zO?Kp|Gd5JF%cb+yG40b7QCBe}aFzOL=VTfS#h|39uC3!sAjc7CaLK>&RT7NZ%676zxw7c=!nDTm!|SKPl)-r4iMbXhbhm z+s;asMvBy~oqpMm*0Y#p00!|Kh||zre1l?*m#z^NUiS(Ks`To}h%9_lQ->z#J7uJn z$UrN87fTuf&y^0g@uu-@Syj}tsV_fJ4ivt8aO(Jn8k@432yL6_J%RoS+E!%pbynpI zA(G}9@WR^CgoMkbSOH#q%KBx6#Xh+{WQgpQQ_X8f54FO%?9+xCQd}*TpHtjaEUwR; zo2_alJdEml?|*wAMn7bJk@7-ka*cSxbe(4KE4_y?QLSmFXr2$z@eNyAG)Ow}@};}?u=ogK9tkqAOkGaad~(|^JbLK}9V zR=i6=vMl&2J~BG$di9~Q?G`lXooR&yO-<*^kV42< z8Xm@~XW;MD*QF%c$fUj$8;VUHZh-64B+1}3Rl=#c`PsBTyM9Ahp3^jm@ccx=pXxL{ zFD~4Zs5)bs5ar;LpqtNFk;kAOMk^2{>J_OFRuY627Zk*Qx;2`8d4sIrOYGH(v>`9;JfNQd>aMJj=$ClyBlxL@C!DetVp{jkdul0A9U8H!8IT+9-AF`>lNbSJ)a%{V8+4Fm}jY4Vvj zzGPE=L!=MGV57eWeb~*aVzgGf42RcPt<`dn1)BXjk=L)5)6q{_;RByZn#^ggC@hGs zfmdV%*Y!g^b+`SWaM2iI^dIkz6)l2Q#fWDU6jL}-sJUYaBV`V0;Aa#X3kWTCiKdrJ zP`FJ**Y`!ow;O5M01cP$-qPlL@s!f9iyi>Xv5V)9Zy_-vRCd*I&Cqyqy#w2Kw0Lksg<(2dA}&cisaqZ z@;L;>SL|KCX=hOD=GO0i^@c-Bx#z!d)42=T7vtsUKkvp{Hl4dJeE!$R_usJR?H`&r zaKoN=zjxIq+)3U7-qBivTO0IAwEn$XqzCUDJ!YjyPoYxZ=>cP#SWgD+^wi#&u+W-M zseW9y6uX#?+3B*U$7QT5VVDLsUSseIPdO1ZcTab=Hv*0*aYsP9;6y!z|jmGLl7eLTYc+PN_LEhs&|1^;=4_3CgDd-1aLG&uXj=LhenK zb&lg@qm~MH*GKmh!m+%KXLh%Om)uA>mzvx>0(pa@W7mk)ebJIq%|Hu|KWllXTWJ?I z?b^3d$^f)hZPg(}Q#Fg{tIeh(%^tXV!@J*0xxD+@Q<|ER2wX@TAp0YNNM3N`Et}6< z%Ho?}e$x#sPp7fz`=Ixm&_3|V(fIEu<9<{R?lxAvW15YEL41S=Lwj2*RtLpi#(s(u zqx6FM!6u!?F^8M}U{*||=iExF?*Vi0HznJ!Jkttdrl6*Cr5F)Ok(cCB(WP!i)TwF` z8PKGsOo*0v@Sq>&)$F9_<&^*Oi_Ia!mxr{tt3Y~$Z7-#JQ&TMue@BliDmQxJ96l*M z1|W+LRxvMzg)`2{%`}Q~{a~#+t>CTl{Os&3#=pgpF1mmPp8>L7W*T2?MEE353#0vd z{t2@lR>o{jC=0Jnqv*Q;THgBR(z5I*s-^;Hrqdius~*FgmDTg%`R7h;GA&z6F6DnYb8+d}Tn>p@BsB&AX@;yKG1azJyUi|4!TSx3Y?l!L=~Wilhrkq zAcp^bjeiGiX);@<_=jSEVX9eRDi9Rg@u<9FVhRBJ=^7hGSp(B(x{ecLV;Zk27&v3N zA$UZ?%s+j8ZqH;&0G$E2Gpg_qKUy0MnQL7)kF8h^0v3mBhq`av^Ze;*CO^BYk!)rE zePU-$lk8&l`2U36#KK8T52W57SUfm~gUH`qZvfJk$rYCi(A`CqF6ef2+g zLhP5;yrJ+d@J|>iW7!p!jD(_5IjVxa>;=P6eRDvT1M(C|Ho&!^cK0mRhXLUw6r= zzM=r{oZcVEAIa;`+t@5MntKevG17=;sl5=67dN7LB~RwpDzlcbH1M0WY}O2($|nYQ z`Vt8I0ZNQ2H7nP!3)-t#3HO0LgK4zV^As83OCXX;mM$rr?CAg&DE~xJ|2tiY@v+O3GK%=}CyYLlr6deaPYe!rHi}B@82D0aJm6dfWu%mlJ9%r%RS9 zg$7_K&Mxd95lq1y3A`bbmt*5uS(UXwaROVmWyzLQN9Mq(5UjXHEkQVv+Se#djg(@% z0IZYJoh*DN$CTtypahyT7R${9su&^Zg*$g@ajm*pzC-gL{eaxvzH zGI=GP+6s7Hwp2jyseSd*Mo;t2S0;v>aRG9TMDk~n#<_FF^h`Flm<7e%UYl4SEzbtO z@dWhgPKdoDYid)q6-06$N}y3z$D}pi(LqI%ik22FXy_Gaot&n`{YeJ^M8c@Uq}W#( zR%7TaU`UQiwCqr2xSHsio^|mAHr$16yQ?EtcG6XqXf&6Bd|39fLz#ouR|p-a9@@KW zQ~JR|njC~4NwO^3w3Mk6i3?IA&wA-?8))>=9NEf{=+>4DA_S*Qq>Zn*_4yrH=A!>$@F9RxXU<#-~WT&nqqfhKPxZ94>aptv(M0;9=EzDGzZYrtwj(%yas%weFs) zuUp!gwZapaM>rm5Hk4jDG~k{eagWXqdlUY*-cKOk(E9dG`{RLhDInui{7z0il*wtC zeANF0rgPK3hjNV9bk17j6X7 zViMW`=E8uJx!NvYRqTZgbbcZ&ZxP5HoZjR3YWx$l9i6u_Gl*7f>DUaN85E-OpgKZj zOQ$W((x z5^>S3(Hi6HAz2V)$pIIJ`K=p9Gcy?_k?F1=YbZjZ-*9unVoy*h0ktd!Nkb z9hv8~gnaxfN?>h(R%mYMmomhm<7X__n(RG>-$Oo$3TT4O#Gw2NS=BG>^QrsfTHi`G z8AUvnT?~c`r3LDjmC33gc&$QtaXe*b%JKZxTz%O;sHDvJaKUSi+EvRj(+>V)e%x_& zqfmUOZU?PB)2)*iMoix}&1qhUjqjY>vrgBj9`$~MkAO@knBI*v`Z)p}SLx6Bmgrz) zRODvExGOSSsfgnvvaHLtqGt?I811_Gx*tqJ5{gaDn#|UB%oN7LaCojXvqe3E6p|-iU zK<@|b)x3d@Dbp^PWY7bt)9y^Q@$G$`k#j*jrwW?vRK41UiF75G*;);92C%tV1w?g$ z3OK43#?4g8b&D#P_qkLnI1QYpjftz#F;E2nV6wA+{l@c4xlj}?9x(p+y;nG0oRZ}D z$nxa)V%0Wv4C)uyQQscJ*8$(h(1w+H0V!6Yrfg=ezBNES{iz}Y_sIkx)X717Fw>y0v&53iR!j6lMdV4Mz(GjAsJR0ez2tkhC^oD^q7!IbuC&F1jL)k zZZ>Q)Bg?j_ahwznvrk*xc4R4g(gpxc#iVQlCdGKi3_g`#zsyR0f~^2pRflTSAIT8Q zPbHs5r?Q-rRuky_dRph++lXesP=QGzAh<{(61W4}cnIBH`c<)%Sy`z7d3O}yXcE{4 zadMF_?|RXkCA^0^E5N*lCh&4xvC=-IQ6tWf0?1tjamd6h`CyWYT~Ta`fa`I>O8~JA z0J(f+Y9U=1lF3m)5e3L@AglvqmxEepf#Cp6K(fDJH)Y9m<`yRPL+71U6X@hK0LAj0 znX{i=SSS+Hicv&IIJa!$*7}NNMD~t*JiZ%bdL?=;v-dOl_I^3o`xy~BiwXzKq;jiE zDOL@>%4T>&C<>ygIGv9jMN1m>qughzJHWsL#i-J^rAX=F33Xb3tQE!R9`F5rJPaWAz|6(N#nX3?#+a(aY`A=?i9)TqNtk`YULWaP*Kes*ruaWyTI`>1RP zg5-^FZQVY+DJ_8WPNe?FjLW0Bgnj%QR#F#4j_J)v?+Nr4av$>hqoA}}qYXw!7`Sam zra)U=Tb&qmyU3ZG>J?JZJY}MI*5Y7w8GcaD9QwhhemCqJXDXFfvnl4<2wmQvo#}Q@|L1OV(3;1XIG~+ z-2_-`wC6e|z9=>7raeg$P6Yc0<3oYz74GotXPd9n#$R+*{_ zb}FkJ530trVodXWI$MtB`&4_k(MavG z$-xxDMT{GmQ}ju#6AfwxbR2DVkiWCjQpgR=F={vcb=SeSxYr4rHf^GRMv3b4oOE-uidkW;SK1ff1*VNcz^`9#nQ(&i%(QDZ3js`Y&{H|zj@e}y9_{XTj z@OPy~-=<6DHvr>eesy>>4mhq6~SiTLmEDBmqkcjCBR%qI}SYx)J zLFhKTLQ)Iyt9*w${zdRGkT>9=!!!R9HCKIi)8ay=HSZ)~2mD?YS=&M)k>@%!*k!R}_6Cr&j6V+!}78l6iUjdQa& zzoMAk4jnrP)~wKofB>B~*ImWHPKaMC=Hd>mQUDINWz19AhgLRLSwER+4yC74lUcIf zE{E=T-5;{F;>URa3e771kDzSHkOLTUTmw$E_syPlO+K)kI}BI-i!*l8{7D-;$8uW=@9V>J(j!mXY z>sz_;YA|0>;>z}&f^;D<)4Dg5bFEa_b*kDsKs1GY%}LElbWdNm_31@7D`1?n%1KF9 z!KwR=t25&Q^hFqEwKb&d7)9iFM`49g3!?hGSP^ydnfi$Ky2ORLtb~>G&s* z?1h>d(vlD1I3?xkz9HJO=BuerzT7TDJlAr%g6SMgk`0`!39wSn>h5@JegnfBCfHSz zoDMN*usWR1X{dy3Y2aWegW$W(NGj1QRVeJU8g@x z#0F~U%(_T@qHc_HwY6Gyry#0|leB_iou|uF{)$%{pULZxI*DS!6-DBC_!G`>^#l2# zKqipaOgo;ctoS$0B!;3{;NM7MbaD7gnR-mHR6^Idsl7kp=i-OZGFx*+D-qR3v{s4E zHJSlyC8&;OqL|p%{+0hLl~S5S_RFM2sU;3dhpoXX{}sjjcS$A^PiGW>nW%!SYjQde zpmVY%yCW$j$<`-}#Ln{0a0c_1WniJ+fRvFF>3D%;exaP!GG=a5-YJ@H%@KIWW59j_ z2dAb|8*baB(A>k+=AM6sGCnyh*rp8V+c%u5o0(<({Ta}OKJf9tEX3o&K!=$7N5Xx0UbRAGKp#|J63$* zV65>zWJ6RLiN-<(n63Z6&r((p8l-+T>IC~^ilq>Ij2BI14p_zW1Pxyii^iH7lwBq6S>WLyqm&IjrCu@XyhQ8k2|%5J1n`8KrUFW4a5@V zxLU7=4}<;BvoRC8{)o<5SdFc81yYgq{XRf0#Q_%}xX{{fe-%zNS^~j_f&gh{aBYP{ z5RK_N9jZv}SS|f|I`P>WYP8dCGvQYAJ`dU-cP1JW7gTwbPEnHMYVFk0nUMB*xsWqV z{Cyr$44E!&lwxLf`>AJ+V_N&x5^|o#Bu6Mz2Oz>v7}`~y8rNny&`&mGeCNS zcuG-fKywC%&bN9%v;d+|JfHYa04Ut|M4CVoRP60QZvw0~*r$b#IBJ7Y2QFZ>|TeX*pPRT>8P(3RX(Q>B}AL`(iVQ(d|vN)%f= zEj$gaPS@!)2wjsL@SJ3N%Fb3jznRihtw_rO1iWie@W(4bIM$)4j>H16bTC_TT2K!J zPZu(}#6eDpPu)E}ThJ|I3fctunUjy%CgCm-6O$*+E$*Mm$+9T}y0YmUfJg_^y>H=% z$Q__v6fZb^-7ECRU#KE+YvXIsADJ%|H&R#a5xui%ceT7R5OlB)QL~BGTHZ5MTuj@# zTm$U@+X~LS_C@O!udK?tz>Bu#?!WrQ8)u$AmO5c9bT?!Z(enqZM7FRo!!0Oe`ajh_ zV`GL(cy(65>X_$=FyO=iQ~1tjZ`X{R!nxA z(BtTNY_A|Hlfh0wLgR1k4s`zC*azru@ArdHv?tN;@dEiC%76!=vpwi+-bQ0}L^>Mn zXL`P76pCYuNKLx*#HZ8f{_X%3uC0tvVY-V(Ea6#>1MWj0cd~CdckS2}oiD}>#;$S! z2l#QQoY!1CyD>-jn2-PNm{tnu5SzaOk$DHJgUOAkvOzU!U4zvN+4B2nhYWFYuvG?E ztG4^zvj5TQH%!Ue895&}vdK7z+*VShqRffKat0uet`vucZTSQGcrqBKbN`iWr7Vi2 za#pcDT@2Epq4|m={N8A!;=|>PjCIT8w25vLz;tt_qXVkcvDuuP4QvBDOT}=;Aev?J zT5Ebjkte!S8pEAhp<0VsSwnI9b2O5OM~~qL!DHv@| zKb>-Uu~H2c%hRP)p=`^x#0!r@6N>9iqFWX(=PJYHv>>f&2hinrCxHpQ)WRk5O> z3J$VZU84gPt@(6*+yTQk4S@}LaH0^C6}rA<*s;<8nQ(1a)yXPPt@u6yclxWt7uX78 z6dKdPi{|>OMRbvt=P9C-MM0G%70@>Opg$r)Fyq&6@q2+^hggl_AmtHVr~H3BlJit% zQC2^tSV^zjFB^Ayu0 zR${h|2b2&;X#`yr> z3<*#w@ijd|hH_fKkL20!%jkae&v@}kd#J2gbzENALj|LiO;j-0@lo^*{IvdaxdG~- zbCUf&svVh_0<5>>sXkI4atjHg9@&7{y92!r-GD-71A|$=G@iyIzYy(nMEyah zB$ilVd=azN>$Q?p3>49gnO+(gZ3&zTS%C|=0OmP-x|t+=A@Cjx$|3 ziXn3EwYbHeyU1o+4xT*Hk6N9lKAkNbkG5H&Y7|fn>XukRr4=pA?Za4>9F?JQvgpQ5 zk1i9FR8IF}LsZ@%=r`E9NUI=VV~N1>0R`!-=`qObqjvjIyCHqqxWJP6nB%)?T7RKy zOUgme94*tG8|3_QRxnsh{0+p!Z-X3{*-jIKSV(6y(V37G1=9NAbfZ6esTmc#*X|%S z;A?e?4Nk1tU@EeSLDEG)bc7;HmjgPmuW^8(GOA$D>rn^d_FMO8&5vK(&6N840EUkgaV?|^tYjP|U= zOFGg^vk+GGH(8^Z1LwO@4JF#Yj{`?8t@)O)4y`zPi2HBX>IpsY9G-Jr3uKpw(<}lj zg?z$t~cr`iSBBbZmrf@fO({=%^3DZxyN18Bcfm6jhLxBUZC&8`@oebi98Tm`H!m>brs$TN*~#rVH%K}$ znj%H?6Y^kxKR2p}N$Oqt<@9J#C!6BNO6l(x%O0e!SW(ipkS`Y{q8N}+l;uKP4_!i+ zYsGW*F$-{&X(%){93ml(lW+9*qC-sAX*C73r*Ff3WdAM2i|GJM%zPoM+9A=4#Vq-T zYSW=L_%N4M`J@R6zcV(Rf%~3D(%>sU4Zc6y_a0s&x<9SkSV&%}Sg^#sxH7-H6cs2s z-Og8(Fqc)Ngr=x6~d5$FgW z1u0gLk0%qRtdfg`0$pm3$0ie|1bk9uRV7wF<)lqHTCa*OB#Y>?q7YaCKzuHb^&6|;$dTsb2bO{deTVm2GS zz`f5Wvv`CY2V9YSDzfOik7{>~M>xMb=9g0D%xG<9GV2BDbiz-QJBR0-bSYk+ZjT0G z-t!6t+TWeMQS`Ikk0I}pP;}Rz;}7CIEz@-AFwZzCE#kARf|Vah50A$+-KdxCWM~p0 zOH0T@*;F}UaKwkuZYEsS0>_<-__>}GaE*Kcv}>SW@xFegLBDAIP>%E~D(yE{ivS_p z{EmXwcPZI?MiD{5G`l1lDFf2rNWD~5(n(>UU-1d^1nnYx)P?~4fJ@=V8eFDEk+D>W zW~mjxAauLzKCzEiovKUTiMihxVk#?XQYstyC8hUA@_zDKh_X*(mA=8M{lQLbi_n`> z00AztO2--fvuK6llNR)^ls!reO>|GF_t8pCzhFFAX79U{DRlN34dyLofcUGNNbI2? zw>wEnF<=5wT1MRb+1JrQ7QV_wUKU zCyFhL3a;iv!X=uC#AK?Ge|=+1r8wQ4DLNxGUYC0hlTq>kh?C3cWL75{)Z4|rubGCc zvW$CM`)Bc!Xaew!#!LSi?ew?&AJPGW4mn z6JzTxu5Seqi@Yo=F-v1EIG@d}J8yYpM+Ra5-Vr#}cx6jio+wNY`Ntnrwax{dLYr070~xShF59kTV<8AG^#q$ir46jjb@&>I|SYI z4*1hvJK{}Q#Ht~!z6wP9+RbOA++22i|MT`- z@~V0|k<26j&Ela@=!7puJeO28QT3Q8bq!l`edcL3}3Lhr)-)+TkJbCy5_eZidjM|G^t*(ZAw6m zaFN!%D1$rQsVd?0jFp{9;@M_l(WTop<{Yyb*0uzxJu_3w>526f)1m9dI>A^xh|3zU z6h_z3n3lcmPhb|S|)$|UB$ReaCQIqJ*5fP zmLr@U_P&kZhyR2|!9KSj6hyNnPq0YlwwXl+kst=eh8oi~YL9-+9T+Vuu>Ybw*_fwZ|}fYzz>*7W)0hJB5wuHnnygFRKkB-aOKS{Li>%^ zI|MG*aPV40%WvKx1%(#bT2~z-H4_UYOR>yCgwWY!Ol`uco=!wh*RDKxBDPYwHX2*U zS2z_Uv3~PuGZQn$%AqV?e>4k*u`I==Z5%fv82>9QOXcaT9#plYf`y3_^G<5$@#jCi z8i^mxWT@B-2SoFW!Rqnx%cp~tV|CIc1TG(5!!fv^Q`t#wy$k*g{yy_b>cKSX*aVNq z>J1(zGEhqnU2wZUsB-aCcWfLV`|RXX-k^tA0>zMOcztJ_IFWMZ$E}!_W{JScU+N6k~HKd@Yq` z3n5F_VZ2J}nLMi}#CSSBaIbe$xWO5+Kqs0wB=Mw(Q3 zs6@orT(K})vf_Hg5_w)9qITZ~uYhmEFEaNtpJG;;dsTqu!= zPMIhzI(42a=6y<&;sTX^*H@XNgk8dSA z*f43>CdC32jk9!82XX)@JTIUaGDXUD0RfIrSpls9F_c6#q$uRPRqkupPVnG2oYlNl zA-!V=f3qCY1j5PB0+GydN;Wnw8ia%(ir2WzY9>+(3LG}Xq9KY{F0iYjuzFgDtUh8{ zMH8PRD*-N4(L^5%cdCdp^l_Qt>mz~k@_@ozfH zobfYR5k!SRR0XLsz(JD@O+Gl^97^gg)B|3GL&rOj`C{@PEiRtkO z{jPi%e^Yomc6npt{KbtsFZ`-#;2#O7Vg%n@D0h(!8@2FAKefQZoqihc71j9sZne{PLA2YwHHi&>zyi*!ws?m&_W zJM7g|MjE#k8A-(Iumq{a&Npm1fy$<(0^X$!Y$U%=I`fY1=s5a;ykI4r!;TVGCRpCU zQ94jeWaP|1!qlT$GQneMRl@jNQpwq*9#I7J+4QkgAZ3_YLkvif%#n-|Q>IuR%XL^Z zhfFhN>Tx}p!f5h_9LdaOQj2L>7X$R}Wxfpa@EdqXL)d2~@`mazPkG4C9Jw^6GnU=d zDjF^{a{Up~PDmTtBEF~OTWD{HrMPx=?#Sikp~C8wBXd_)3umw0MMiHfZ&X&t62Rsm z5?M(Aii9*FT4Yf^tqv&&VY4gcV#iKIN|Q;aiokC#Z5|)WuWTM$+*~Wi zJ9hQ4_JCUZ_0u>J?#(Q z66+QY-E_?aaUw_{>k^rG-3b$H3)AENAb(nnISBMRA=Q>uGb523Ei8$NR0`i9@BR92;z-Urlhah@?`1y7 zj5VFgMw8@k_>#ZjOa8{bPo`D!*|u4#eV)ZG@xN_682}*UM6K8Tr@f%QOZLAv8H>fz z@ud5S-k~nOPhfm6GLKT3zrDWATzUzkcss@Ovj6mu_q0D`ORW3D#m0D8qcfA_)ectH zpA^awt85aE9bJT~#AnMDowTdS3U~b=q#M2oZC#0qydcX$fEDDBlhLpbz-gKSZr0lH zp*H0DC5-zW%ry`BhFlYt|8p2+m*s!Mf4by7?HBo$vPz3_Z?2;35BQ%4lI3W$m^2nf z!^NbTZ^c@(&5&hTW_T~4v+;1z37E-(Rh!l0h4+5aD(wGmC7fU=m13IY{BCMTK2EL3 zMU3==O|nj`$VK0ZT=cET#eE~b>;CMWvEL{1yG)AEUj<+<2eHe_{CW2(0?FsqJbv7B zeimlAz88jEwo@*CaO&J>yX^gde7f%~cFFcbm)jQmZzIeJ>V?nOn;C8gVz%Aof(<}* zn`rM{^}~P*Uhd>QZSU_syuW+s{_few{ydKebn%D=*z&v>Pg3^-r8MH~1j0?ujuG>n zoqjNY)rK#lr`mfmZvCBLahmeWuhOm&!k@B_6L0T&41mEMZM=$p_6z=br`OOp=a1sO zou=o!wD(;#3c!ttBWeWmHuVAq*xOxVC2G}7o-{_0CVqOw0~HCfiu&5)m#`^N;v$Ew zB@zwRBsaJsvbv#q?^>vAy~q3z>u%1gyOTFa^vMj&w(9C}*`*2|_Nan~tL-(i7lSbT zZYOPoTR+6>eH-gyj#;PcUGIOr{pem}3)PD1vK=dZYFMv5v75cM3+va`Kj8hn%dX)4 zjd*il{562iER+l}tqc z@-wSf=9ACe)!qt$)#so6^yf}Ia4h}&Up8~&Ry(@_{#qZTYh9viB{idTueJJJGz~Jn zCZp47-OtIHTv|a9MU?|(B{ZY~(me;HcJPLf%%){6E@@Rcl9MT=b5O)) z#!-2MV>HSyjPG#5FI@K6f62T6sXf>`c9L)1NU7~@-HCm;&CT42k@)e}5Z=f71`{V4 zc&5S5L+l{o!F40-2`|E?W`pf=1Yn(XB?bxiqAuY37NtvNoscm~f6)Pd_^(_j8j}$x zi=rVYAt$Rz0;_7u&8zKkXXyK>-hnwBj}dHFDYB&Fv|9Jx!qXYqncf!Q#r81Y(Wg>( zK)zRV6m+?H*+=kx9;F^1=`p>d?!H6%GXA($A3YGJ-Wh-Vb)1$3)5NdJPd7?h)WOQ{ zna(#V#*#^TlRNs(n;c-b$-Bx`6czTx`&;9~@i_S{hQlF^T=QA?nwQBnukfUIcLwA6 zHhTp}>#h$&Di%w1mx*_=eFVfMT~#E25HAO?ssT-RuFuV$8I4h5UcOiqSe8=` z%k2%~tS(M`VhIE-N8>mMN|z3Q`P_4}`9x;+>_}|V`PSH(w20Jf>gIoDren)z;TO8a zOzs}<$9vrH_c*?XJ$5sk{qFZ@FGD)u&e>{L150feejCg#&^4Dq1s%2iMz29op)p}F!RiL%Jy@rzRmi2 z%Jm2v#(UQgV^7vsk~gND`>s0EA0=&E1GB+8q9fu6QP2X_a!!;2ng~@fSPcWaD7WW{ zjp)D9gC`bDum-+X6irLVO{kfcAV^5G6(z*EIY^AlX^g`OtnGxQC0}===XDRArG2B* zUI7NhCM7=IMiutTo(o=Vby}dyrHlQ;73M?qtuv(Ra0!20tv5635>7goRoAUOEO}3( z`()Lf>gx*KGxP2@TdvUU+FH{UN}}PtQ}liehso~!iNj=ke=3bI*}p$om;V&LMfp#k zqVnc4pMI0sth?-|n=Y2Ol>M|#5B|XtqAG1V`fmwID!kEQOu)zbpD5PP(Ei;`jPuj= zoout4REi&epO9xe&meeWmim?LIP#WYue2ESo$`N!^6e4%C-3keo!Sh>-j)5D_vEr&jChN+Tg1OwG8hc=SZ_q}dLCf%S%o zc%k8BjX^4IIX$HT=S(l|M5Mlu2bs!EJ67@5b15+AFiMHYS= z|G!kS_)t`{odCeN%coF0lU0GGC{6~)yHGZQ;AJtY;CR8zWX3RHW?|t_ej*z{63M3p zfhnT|MqHFM!Hz~Hd=9ncllVH84O0;D8KIkhjYRzGO_u{^GiD{{DYosKlr}LHsQi+H)wOP^8M1`8n6p-{m5|T$hc#Z zxt&Pj)(H4ebnx z*^$~iN>X>HX+MBGw~!kl*sP(IM!y#yQf(;AjWc9 zR9QabEPe3Q;zcJF5?EHz1EQGJ0~)xyRF?7Hr2wKc1FOe4u+rYEfP^QQ`Fo6o&5Za} zn~vPxZLG%U2~CXRb9{%}#bzDYz5&}Qykg+i|KfGMMb|Y&HJz+$vhK1-7x79X4WDxN zD!YV1EsW%Lk2t3D!n zzKhrOGR3sE^Q`G47Rfp~BwsR38IO01@@KMRr1%`qh84Ul{^2$r@%N>WHJba~=!m^R zRFJc?;Py;X1$+mm7P>su@axPpbBXqrxGdGKMxOQ7HW*uVskg5*2%+`)K0Tp9d9VMm z*X%u+$NLF*9iJEB#h&%&9dh*^%-C(T6ujPzEc^3ql(sX0Hh80>+P19uDAind%_Q$h z7#|UnbNeg1?x3>Aq^r4ApAnX?4BViFC-)cHPP$s^WIO zS=qa+|MV_>-+2}@{(KgdpT-!bI~DFOx-mQx+NJs6_51EWv_74>es%cNbn50?Ml=(w zj_UZoRyL{^b{m5KzVV6F;+3P%K9Me1$A`73Mm{w~h6Fm9BT)N0?ZH zgm5|ITq(My3RSCdAJRgq4^)9Dy zSOv&IIQEf1eIlqxycE!blA=H}n>1~m){K7&PQo{tHD;4)2g!qz_plTwUqYdg?LH9x z@tH0h5(9IS2W+34e6lP5#6FqJ=en_1kat$S3tqu8J;|J=5}mAjwY9$3zey3T!57Z9 zF_V?c)W)3Sn})xVMAK3yECVIz@fca9t?sv+9hEJ z+;?kwb;sGey%14P&I>vykvs<34RzbXuD+suaf_o;=epbI-Qi=d(*}6HRd+v9xpFKxC$=R-M0@BuJN-i zNbV8Dw)%2V4CL{4inMQ5@-OV_pMy+rYYRMr@8EgnUivm*HxJUxR_?NtA2%B;^nRq{$(EnrC~C=R6{3!T|mbulQa z5NYK794h2ko+Pv&joW{=^?%@}@Yk3Wj*=T}di2e_ZVkp$T9cN-T*yKms}Xh_t3V*{ zav26;`+IkP_*lF$VULZEg%6#s&H*hH=OsB;9*(Bw>P{d%nn|P-TYL)0kqZg<|D9b6 zcpTMrzW2^^cOJVlJF~Oew|2D;t+WrlW!aV=vWy?{Tb2#BF|sYNjV*yJ1K~{|#1J5a z0x2o4Q1T*y5DE#UltP-&gaQFVfDn?V2@OfgM_o!&`hCIbxp!t|8ym1IX|L|i&d%J& zx##@n%(>^>R#|uMz*#retY|D`-^kgETK?lhP4~6HBZ|qsqodhZIRKD0WP$gMQ5NMo`WoEOxRmN{ay{Eb+?mclh39R7Pjr5h&r+>GsoZJ^vcSPyauSiXH^S*$pqx`kAZs=>89{^G=J$(cn1(hG zWYT@t#yLB3*?d!DdbrkoYBn6zd5#Ok6-g82pe(}5)3l)N^U0D%LJdMR7EtY|4wFqp z8YfCn4h>c3HU-8u^;U?X+O|C(;pzXH{s5l~^WV<=4Cw6B_~|rmToFb$ih`)|sC-Da^E#e?`7(uXSW2B?rRbC8i?&(lmi z%y%eFLQ{aF;mdeoL1(xn3wS1-FIlxNN#GSZs1JO0k*x5-@;MFd%~4L)WWP$9|!_C zif(VaDdT>W(?a_e^E_VA6>iNqM2t7J*zI#b?gmT+4_vbd8nJ9nw5=E=LEWc~$O%7f zPSn7>6-x!1TIGyC;I&5gHK@JiP(EoB;6t=6)BipFpKR~u2%9(RskIp9O@wh<%;v4v ztHFw9F~g*Onw21cb$6!1-tuO|IJ1|NF6un(3ruT|R|m+^<2{*kw}e))UBP&s;1_Bu zLp|qr6JO*dj7z7Nppu)P3l7iA42I~Q&cMQ!u8oVzKE?47PSAkY`fLempr09YOQT)h z&QQ?GWigZ|Yq2me1P~?F!r7{$JFvXdE!6Hlv^1=^`#YWP4f8OE!$nahNi{?jSysn8 z=a%go z7~4;zNGzdg(6ofEgX(OBd@^tdRnp_KXbO5vARyWr8X`VH@yiDJhTK%^jzkJRH6DXHbg`c%MVx*~)%yQiz73VxA4Qhr~^o-8wvLL!_AH>#Z^&vx~e-}0nh2Cpx|_tW7&X$09ppJ zsW1(cM6m%OXNe&zD7egx6KQ-Nl}26ybfd~&#zc?c z-{Nync_uowQ+4;UvFA^e4mb)4-8IfESUv5`xSe6*oW--x&JWmrInk6i3v;40n{X%> zw88@C*IN`ZoNjikTrO)CEm_x9K@C3&sIhl2r;@ZoqlL#y*+58Qn-*( zsr)`beu=N=PC3b#tp|?dIm(4TyoYpzbmKMT?vuRs$Z>Q6vghb)WAqd9EWUx;iXum& zqYrNb`2F$S`{BzU>93E`=gH6T?cCb`e=OeLpV;#QPW`0k0T@k@61K=|jHA@^2fHZn zX#M_c$13dPwd1Pb1Z^Q1oC98<1CFRigJl^SKhno%%kjK&$Y+jqpkbIWI{i5QA^8R9 zrI#@YLOq~|f!pjwSv1Tf$R%Elj;0q-!Q(Mv)@3(cI?LS9Qr{6%-zsz1CE81`-qY?d z9~=-u4psA+YX=2_E}x=*Fg?tx-uu(>^TblkWFX$t8UtpN&Tzaa0c!|4F%8=t2`y6A z0oswsylA#ch^_=1%~XGTIvh}fEoTzFh%BMpObH))0upu{`P5RQA*eS?Olz7Cc(5> z3`!PhLaZy3w98Bm)A)ZLRDri0Kzc5bxe!J4AT^^@%XR~LhDE$3H7Ch~4k+f5r^J;^ zvDF4iM#4l1L^>LK4h-SO0!_%MXEeYlQb3jnFSa!-E;qI}g?XQ(gcZ@z{SjYNo(E`M zJhec4coipcW`_k5%`f$~0D%&o_s4ve?&AeP*XNo8Wq>iERtrR#-7LbW>MiD2UnToswz>3%Uk@^{=kbg za+;`Sa!oo7UD<4lAggJPCycN2Lz`7jmM;TIPS#N{;1@tEDq2~e=955K1CXYJY=_~- zg)Q5+S2*6u2?CZQ@uaqJywxt4jc|iPR1m*TLwP`7Gt(F2SIHW*i|yE>+$di?9AR>l z`GC+(@Ge7vc4zXmQ`Eb!o2CF}j$(AfL48=_%*g6m6&WeeEtc8(0XN{VJ4iq?o|5a~ zpKb*}3V~tZ{g-4HX9OPsm?|olm>e)1o+wejB5~9xc?%#Kj^@@O!39>@U~hR0@3A6k zY~*P2$(-W{whq;OG!>QA807fD3h~*=8EFLmb$vQ3Gi~o^)8gNX1DBTiKiv@ z+5h|$tM+fZ?B>YQ)39dk8hRDVxAdf@hN=U@@Y#-+^7+-t@j~w0=4`lZ2hPgpseQPN zHj?M?t*8Yw%Vlg_8@zEfP=hzFd9UV1<+0g#F$6=&cP5y&>l4Pny43H*F816kh5EG9 zFN8@fx1bisT(aDotPO$s5|E;U<)(p-RHl1DvSUM7OKG9@7yWU?o||p$Qk{$)O=Hwl zEaXBlP(zg#e8A3kr#X@Hrz^=orA3uP5{@VB;_Q66KNsw*2C_a~eM-)m>1-_ON%LMxLn8?S>4ZL!8Do+xi-U; z@P+vt2EGMaLb92MmCX?r6A9WT;}>Dv-S{GIR%6#Y$kUmRHA3`wx{?#-j&329!@uFh zNXSU=(0wy3GlPVd8b}JVuQBQr0Cz~{9Up2iv@{UJe=jHeMnH3Da%?SFw@x^14nR~# zjaWt!Kr`kJy>fIfcpxsCpN{t91=NK47^0wjfkut)X8m~#uw6G2Ty)pz=`x4y!gOsG zvK}*}T`O=O9{fTk-vw(eBn1?{;6xjFL6I&JgEZ(qFSs4vloA5L;%G)PBoYck&q^kM zUK!sg$NWJ~kolxxMMOjO>Cz^erOYSFS-+vHHn-@|j|+J^Ilu|MZgE}?$;9;gWG@i_ zC()dlIZsB=_^GsqHe1(1Jr=f?XT``G)O5xCkPZtC3LVh-s&qC?1`yI+hyB zwx@CXp*KqXjsP;%j^e8b5k<}i&=iOe8wpkMlS9uYoDhI9mD+Qjo37wt@|L$Vl}1!% z@zCjd_k0TK?M`w`nPEFHK$`bsmJ_X=g@mQTUzBqxBXW+crr3stP1b;Ju)-qTU{2Na!fk7ymwt1nWz4Pd;3y!;WfubRA z;{sf20nE^>4782Tx<+`*P%V*@L?hYQ70DI?Ce@AgMqL1PT)-C)PWD?dRpI?M49YqX z(mbcwn5&fHMw8Jk^9rX2h@@gr-g#asm6Q31ABZU)vS4gDDJVQH=>7uX!cjX&WEu&M z^?xZ~m(PIw4bSMfK6k$W>kTOE(veXR%S&eUFI>`h*~3Z4PR1MTB>v;-HOtnmU2%JV zwK})2t&Q>rv=={zuLQXgK!WxHrT75tSVClqYX;0}FL(eahi>$a*2 z1pla^pu4=Lj4+u1eWk?qQMuQlGWsz@AGrCMd^Wuri`_ii4La}PF3Ed@m@Rl)7+hZ= zsVo@Arsg@Hy`09x=r5fq8Zxo%mxWpVZmf)XojfiTnlC!-{%_N`7hA35Ab*ixw&HKUM`y&_RO9R_KcwgizI->`eRQ}8@j{S zWLITzAQ5;PLo1hBIji4F$+IRpcmr@J7pvkwtiFFWwcuTfg-d-toN2h_&}+9yq#-!< z=gZw}b_v&p07WO<--H&VJUNaS-6Ev0(P{K*wYCllLZMk85S_5;gvBh*7ex95OcKK# zjWyc9ouQN^3xzE;UoqO>J6`=4s4Hpue!NG&4Ezg)XTH{{OE<#;R= z$q!f3e0j7M$Qj|EwX7*=`hBJ0lBT(|wVSNM1LP(cE2{6*bIcqWy)D$|$=mD^53Zf> zrsqXy{^Ot#RGnjRCsDWVC-y(KZQHhO+qONiZQJ%Fnb@|IiEW>p_ujhao=@GqSM~m| zYIjxdy`J^_CN*DDHz$J~K&=ewwF(DSbs|jRZWbQx-MgvB4C#|-?M=mlh z#kGfT&Ra|gXn2!aN6tkthk{A5;Au!Sg~jfWA(4m4i9Swnz^vKLq}o zkp!-Ha#IN|3iD3xmY~t4dpu=`;x&~Fh}q{5DA zs6&p899EUR%Rt1YXZr8_naM(-YA(a3yS!GASXlYmAk*1wasf7usoAfvuBc0P|L^dn zbdbig#fQhUxFia3Dg|)jsw)!l?>>8$V|>1OllO$HgqacP(qA-8L1U89M4uUPSAVf@ zvrNc2|2{gIJGD7o-+F$*evy3veZdhD>Ck-WZIXRyt<(7Gqa9%%KKo+s;O*4(27igC zq>RHamUqhfO89DHFJH~+tsK1ZzFh-fgVIC#QNNfQx82rutaXokD4&>}t9dtk8nk14 zP2AkewKdIiB1npp5zDiwVA;ip71j-L86Py19Z(-YAbA;y??3%5ICQwXQej|C@uKXh zZm76jn{1pc%1CQcd+NMsk|ma1Ve84-$mpVfmisyjQaz!mrps1-QJbUE+n`~ixAbdk zZgjCc?VaDCy`-hqrnBp)J9=F9u=YQ%8}}ZEpk`Oat3s=SUv_JqvhT{j+;Z#r40;O2 zb$7qIigfFl_Z6OTn1SiJ_s+{*;ye%JYq;+2JvLu4!_RK=m|i*l@Vpw`B!72*>f^g^ z_MDm4XZa2VBZfH&m50UASnNCb?1whE$?H`MN{?Dd(=r@XDxs(tOZMmxQidw4hbK7t=_AM4^!#KpvF zu{Ye5uHWUDjZ%%4}1}89T^+qRDYV{_ksfQMY+MF-AoYdMD!K z9)}(A{A_sl)6lMkxBtA4s8fc1C3haR##vTREUnRzoHC)&k)6Z*cO11^XKY-xT4!pU zwOWV!Q<-9=;f;ZMrO7EI1zV#7QR)qO=o8ZWyl?`SU9IwFx&~Q538tG1V$>@_{dg~% zS5NnsiFcqnx}baKM3poH^s}ccCgw?+-WJ58dv_hjDF4BdCkkPyI{K`X(%l&mbP`N=XUH@OyN4^L>8Q8fRydQ+ zY85rb%%hhpYD)V;Pbc^Yybm#Ni*t3Ot&NYk*Tco!L!qv>yR7V$6nykmu>bKDzI5A) z6>rVy{S1E}F!}ub?jftU*z@IvtDD!}-&|blk?Hk5bC>z|{mJLIdGXxE5jy9?goK7M z-!8J@1>2NXt&NMN^Z6DnZ7{ticR3l`JPdw#zH(JAub#SD`^CN!D8r*3hy-8Dby~E_u&aI?edg@*rmA{r2!y zp>MAjZTq?ne!S1))&XLtZO{GgGhxo?g-{%gAT^;g(?QmA^5pz3BgWS&5IY~`)Q`Ax znQ>g^ptX!+v2p?%^W|__E8sg8!cBsyb-~1}Pqq7>d2qZL0cbYn$9c!TMc{RS&hfZT z=}$cT-e>6cV?gVp{%fLCc;J^%$ZkP! zz8hYRZ>nlnT$V%v2O4CK6fj6$*i~v^EHf0AHx8+Ml1fn7iP(tF>M94=k$xJei~ZU4#nXIE4_d}K}NgEW`*+=ebGLD zeIoDze>`WMSh~PZ4nCf+J~%&scLKfK)Xnx4_6f2V(&t0WZJnq-m_FdTz;yx`j#^Im zX9XWPAB-R589JXq;Q0QOABmq}{t);yA8@?zK0!~8z#fRI^YaSri`eHTPRtqT>+=1@ z{loo6PVC{=;fngu|Lx+4y&exlXG1hK;2-`in-x7Vav|&l{lxA>(T%MYNGoWYGd`i6 z1^x>7`t29&7o}8==uF@~9q`J1cTDQS`bP8y`^NId*NKEHkSESw&?mrNq&=%&ura4T zhkGLDLO+^UXMp!fi#AI?TQ}F2zfu32l*TyEZ+i8xHUf8JWVmdYY{cy*_??_QMAX>3 zRBWiHXoH-5xFsUzePjkJXS2_J=H!dgc1iJi8UNAlJK=Z)FLeyjCWRXJw!8th3 z%@mu@+a?jud1mki)9~BWgLiNmR1>_X1w*cYBGd>aQh|c6;3UKdIZ}p#uAn@`2q{v6 z0AfYOZhQP?iNE$NN%`Qj| z1+pvC3=`RWQg;(RGBTdcJ}%alWNc&jSAPY>l9N+lMWDh&(`MsPVwn#@_nJXRFM}ex zH&sC~L{o|NZP;|yN~5GVP9-I?Yhb|m#n}Y^)OnUqIHow$=j8zMYk^Y6(jQOea^1+0 zut`lZWKYT@PunR%y=M)xi8uQ?4Syk^uMl|5!>mkDp-dA<1GJb}OIV9;r-fc>_7KR~ z>@?r9yjC;gDGEmcTUyTiK>w3&#F9a-wqNqmvp;Dz|4BLe&s9xS zNs~ZQRzY4#MNm$KKu%so^gpSW@3$IIK2vVgkm_5jBV~o1( zx!D%Gz3wV_0$I=DotnOwHY=KB7{q75FnH|X@-h7E-N79{YW?e`UYidzF$M0spN`fy z&@y6efmEsqk)QyF%VKzPG~?fd_&IUM>?t|2vF@B*t}zeat2{Q~?X;r~*_|C=ao?_yTn z(NEi~^Ul6hlRS*^nh(J-RSN^D6Yn|HfH z(Y@r&;vmku#s)~FCLBSrX`aN??k0bK+Z#aL-97Q@eh2mGyn?4nk@Xv*dQ++Ru{1$n%6*Xf2qSnu+ zrNRqwS_IM38zPv)*otmVglbwP4?xSU8 zW_k7096g_1TnM52z2&TU;m@Yc!m&7@YbiRE@#@*&Y&M<2Yks?o+t6e_lL57`Py9+s z`RZv}Z}c?z0tVj+c>b7iG-8f2PBM~?OjKHMN{VbWzF+tzUt#_hA6vh)Uj94Gmy(&6 zrUHDLMvht{%d4cMRBy3d{hhs`F!*f~?jo#9BN$6yIVwYf$#);GqLCP4GKdmYEyk3f zlP(oc^b#G~C@&O=e+ZqRw91F3HE83(A1o8@@jAyVVyj zwM3{JMQIgq3;P?cmEz4_<(qs;wEp>quD7D1kFRi?iMg(1?Nc z2GC&greMDZ(t+ii313tSTTkOXVZ<)=FzNR%db7^_#^!H8K!dNbx|rojG?2xj!OuQ7 z*zh6w{I;qw|I%;PgjsQd>o(r|L@8$Gh$2}v^7uLuW?6`@a7ZP+p$o8Ph1vej+Wgu< z{BFAiuR|Q9Jez~qHX3CLzhjIXjXwqww*6Xq`hLeM&?wb_<{gnZAx-{~PXU8p`m;2< zvfeQ~$Qb9Be1pF+z%cF$L+A1vDE@<11WlND+aWU0L?g? z0KXSb6XfJuGZUD_LR*JWHv)PB?t<1!u_pq0LhppG(W-cVfFl7B5g`ys2#b*YW<)bF z?(DzLc)VhCPJsJ}u>}5nT(*E5QCa-LDCZ%p6B=injz}#*TKvSQ_b9}u-=XfItp#ph zFm?=B10fkj3(}#mNYqJuTzp)De7t;u$RV!8{vlPoumoBoikCL?CX}9tRy@ckwL<0CEU?lZUjl&DNa0xH zZ^SAh`o0*?Z}eM2zxJ6$0(-(13Euet7DBIbf6>vlQFjG=>4GhvxDO~R@drqs-$_Rb zK{G_b?^2ubC}_fw5%KuAhYhH;V~QN#_YT8(1pLH(6#ppHsN5(LQ9>i3MSpBuUAfPa z5LXI|3RC9d#1`h7ijKwXW4FXDEh&){{ED`cv&4WHV!YR|3H0Y{4UMq643>! zp!R#^4*Iy`yn~e)rSA(rT@Va2kh!lqPqXf`c^hI1S^BzT@v+1F>tBj^_~p*v>-_Eg z^6|Cu<74x_mN=99X7gbl_x&|{t^a`%`*~`er}z3WS=K4vg0DE*2%EZ+>B*B%>yyKB+RPN<#2`sl@ZSBOUL* zc3sa3*jVVZG2Q{`p$o+N{3!|4<=gqgyuM&qJ##CpED+Y|3;9k3uUmz$lf$khqG=^z zuO}f}6@CYj>4h(=au>(A&W>Ynw3QO(*n~8)3azyYLb(f9od2*Llz$WJVbFLI-9aKTQW2Gw|F#r4Y&dyryw1z|<}^J#Ft+3Wcv+2RY>B)6 ze%BxYc?#1*4&A;(JRuwruET)o*;P5YMicUH#eA#bodt(@E(dO}6>A?y(pg~0ww#G) z?S(vg;z}M8SK!+r@(*?XWhtcui*TtTs?=2P*heG$7wAL2*h7Z(+;C>$1({T&&TiNwlm<+s?RSXIa- zW*fDR>`Fn!OT}JEQpq%qGbGwk7^DcNc&R8}Bqy2^)rs|}PmIERFXil{W~;BPmF#bE zb>Yb{6;26MYHpzQYhj=`C@8zR`y;V=V7@qevio7OdlJc3PRsFDK~PLlOG#2|4nZ$I zBSSqgRWUiaiIkm|7%EIBgHn>SYQae=(do|B(i78isHCGqI8ez_@85-|vl)Egnsb=o z0E34a!5*0GUs5?q$UF_ z8Xy>T(BOzcgB4Z{KA3gT!1gO)o`<7uelxpxK9RO>`r^a2^nsr#Soat(<8sBI?i}u! z%{{rfX*hfgxaV+BAL$t6na2I+c*PL+e?kYFhIs!kB#q23L+_V9`vK8m=yf~zupfRp z1iui4UyA;palH?Qo|pd}V7E^n=-zvs!Y1RkPv7n~^tul_ox(0e1l}shaD$6R}BG)3BQa!GgQeLTAEv59Tno~JZ$zA!Zs$cb0`c`(9HdiDbhZz%> z%jMO7f?C8LyUpfxd=gv~7d?%s%g|}n?AUDF?CfIUa(+@&G!xy+@HT#uW+!H5EfTDJ z&Rn3^cL+V(U%OCn@K;lPPSC4Mi3K~LGw*#04s?&by)x&1qROCUcPTrtMIm}?Aj>nk zO5&e#L^H<9Gx7qz$O=w3?o zHz!_Gx!^jjT@k&*2W@9G)D!1Q7?2XR#eR#a_UR=p&jb$lN1Akx96u-?U$E+{U&IPy z^l#nmY4`PZQWPsNWQB=))o5DZKCexkrf*YJH;dXy45%-=P(QW^3#MKx@p9K5{ykOG zG=;mjj!lxt9!K6&ayBq#iRiLJDp_O15S5KvrS%=Lf~0vP#|7uwgS30K0;1+#%7|9C z?U&y5wDzd3`DZ-y((kXe=XH1Z{$C|GpW)67QmU94&m^UZAbqA~NtLMnv4`^gDl_pZ z8M&;*6WrVEG8#3TllEJ|J3kVqIQ+-Z;!ZRipCX>Y6)oXpsV2K%w+%V3jna?PM)0%} z{5&uydfoCW>wG0f75&aOD}KT_t#3jNI5)qSxfMOFfXIJ@L4LW<3y(HAHDOTeK5TIT zk$`9k;-&LZXs9LHovd6D?P|8F7{Lq!yOc*;w)CyCt*?zD={<1W%ot( z1pzSM-g*QfUYmeN<6(IuVWDweln zbP}@#pv0q_@p`tuN;$anJ*z9nZqkVq1#q4@CCOTc82w_&$J#ZjE~awR&`T@3LMDZ! zKi|LjMv@=-D9|xt1hb zGqTip0DE9!k#5*1{^9e|S?*n^{~ z)0Ce5y8>R+jEl9^VE^5lkN0IeWWk-j+xMuM<(tvPXB~VNs7<+s5T3 zm+yy>E&ajDE7cZ-c|+z@bFGGU;RG}5VIGCGaF1G565#{tkr&KXqMNs?HBcp6pk#t> zCRbNQZ!%LmZ{IHl4%UFkvCRzq* z@%4ad9a9|rZWN_k6DPZLT&9$Cx)V2beaP0hYL=sih(|Y|Upmjlm7VP= zjJtazyG+5n&*L93U|Z?QnUE|#LiCOWJhh^}te|_a|6rNHI~zJQgliw4QTwRe(`hpn zla1N>h_-OrJn@3U`;~S?cmGJPe@*6{R9?VJY$!nCV1#qRa05WZs)=xS;{In9{@3<5 zw|)=GZcllha~E8CH#J$$>>gFA>QX)vldf|Yp+6<<3N^K8&xXWSb&8K)CjXFrXKXO0 z6hq@-Z8cR528vNdi%49Cb#p{iPcodViPsC~JUr52;0ZBulorC2>hmS3YTqK-)9Bss*&d*EE|lN(T9fH*O-2;)hxK z^10Z;6^`#!p;gg}3W3KU=UF6;pk>jasY#~V`H<}Oll6RQ=AQWf##(>y_5*>P6s0r_p6cK zPz(v=6ot0?vYuONcF-F0Kd84RDacT&tD8Zm7R>=Q`WySxb2kDAhzU6odu6d$bxI?>_4e2oVA$9hYQ;j>INyJgvL)C zrO_3-*_gwIS5zkqCD5~MFuWe6F9x+>cszU!rSqDY)G5`hdGQ^UWF1d$cV~Ke!swLP zN=DGns)Nd!8+*Ik3k&-Bc`DlJSxUo~4AbAlr22~5bc7^mEt{>dQh%jd0TNU=$Bdh~O8X}EY~T!Pf2{i|=RF1zS45v5bUK!2ZQxbH zn&}q%IW=;F6d@<>JI@C83w3;DpKco&&RVrdsGBF0aE0B`Hn$cP11oErq5a^uCi1!i zgL%gTS~}Wkzt>ZvW?XFGOv4x6F3Kx24K$X==PRH10=Wn^umDJvp(%hG0KE%}!_Xpt zJ1J904hb%HpKq31KIs?W(|0(~SubfpY)ijX zXR6Zd~3~E1b4-Ru+zzz zEyd(a9mV(}XMpZ_Y9xC*Improiyqe{!WdOOt5Rvkz0dyqq=K9~ z-j>#~(wz3Z9-|;BnRG|8!n~^Q09i*iU;m)4!OmV?k(KT-*n;%m3Ti4@AlQKIIiJn# z9eF6mPuevo7?=g6B5a4zvD*$2`-!! zsXRgA4hcb0p1Cm0gjAgJp-(0J-c_0POqH=w>v3a*G4|D@czoQH4 zk!)6c7TwMsPXArJ!obWg0MZ_PWh!*UFlkB>pl-5SMglt?^L7KyHD~vuvn3F=5 zDLY7-%Vb5ngq6y1A;e0kN+qrM=w~Zw8Z{I$E=Yo-s*_wB(o{scX7lG?ir)Zvh{~J} z!{b<04#6w?ElO0}D`YMmyvUn!btlLw=-V&c9yT)_LsTWwSf?&4Zz^t?rXFHP1eB^R zb8_pu^_4lOHZ4G6K?(!uxL%Y)LC%2HH z1Pv&S*#bz2hxNdciUi`x5{N;`VqOuoWO@een(`KR zc8%xvkdQ7eg-7Wv^8B0UUjn19vDZj8j2S$k(;?KM*L|yl^@G^qoX9Se=Y!%^NoF$I z%x0tcH%V>s%Ul-=rQzZM$#uz6$yCWn62nCIM1Lg@rSsx#$uctC?1%Ycb+R7b(?Q8R z`8T)uxFmF0Jx0$1*V|-Hp}%XOFq{EV-ohikKooH=git4Uwxt)^McnW@HUFY8v%r;N3{DNRQ4%=?j!nVFVp z*U28X=ci5mebigxedGN(4;ns7K2ko~udnVP&z+eaM*m}RZ!W111J?C z(;&Y7cmuf;$rJhZcn8|Ol#ni{I?_#PkLQf7nG4_2s|%?^{##K02NiD`QMUZ2ams);(DoqG&x5@&Nm-rlA5uIo$V$VWdDro+Yp&4kNXL$z zDWa=G@CmT8!Goc;Ar9|Pac_q*VIxwiUPEIh;hAX1$Co(wJvzU78@<8%@al&!jAc72 zBS(x3DYnY}+(gNS5+=%%RE(}69l(TWmr0pNRxsm%0o7{$E1Ow;C@okH;(b7OC#(-J z7wh`l-FjkJcj`dt{J*>5GcRxCGZj6KIj^d+oCX%9>X9jKsImn@sb>B|%Zh&5!0Iqr zIevkWYx?Hnh0<4IbCnX?tw?hSr`S?uIMYj0-J!>nN7WY9laj^G0T&-}r?u42a3R^( zWJ&w!+JTzHDf5znx1_i8Y{^(W|B|IO$#;MLJ9yVLJKqrfrCf=tqQyTBbV_OxLH2hH z9Tw=|bXJnD=RQaj{l^B`{{*xup9mWGKKEnJam(l*=g6P7wr}QPPM&v3V=P=8$UXuL z-*d?;z;-4;e|zKiIW|5T$%nTT=0JyZZ|QZjQg&MohvnREtheTE$z4jUH6CI5jBlat z7I4=!cRQtMeIZ)g5o39MDCBebyIsGj&#m-Bj%f3?Z|v>ss%XoYj~)Z|$f47fa>tLD zY|sFz6TB-B%hpQuv9)P&26wOwh}kOvE$l#_b1TU`$~!9mMA$FFH%@6fFppF2pcuq* z{To88pxK|jPE4=u!I}H*bl|w@K+svbgAW`$&(S>R8$FXX5U6Y#it6-1i49C)=qLos zc+e&#Y)ZymJ87st$3D^yDets&bisHM%w#ckiMN(g6GxG8=J#nT>?Z1%VgphTWu0NxJ?tw{ujHdbXEn{z1V+^oe1$n9=SOI8m6&R@bIOOn)Z*^e}X36Z+l`{O1XsL+4j z0Ug`2{#a#VcM>N`4*`mC8DbSA{yCxtGE^6|zaoIRcwpxsNqpSHiCz9Dgj)cJsecjH zBRVG`QBhax*H9Ltd~Wo2!csuW@l_9SQEXP3+j!Jba0LCuYN8KJbhFB}6_skRM5uT!?T zzUhVEZ<-O0$|+zt>Jl*SIQBhG;xCuFIxOm3RT0pwf6AA5+hp;`)emVBsCZ2Gq!xdi zlb(nRf74Tq=;lig;{S7ZgqXv92R&ujdjG;j_#E%jNhFSt4+D-MNI1YsBj^B4yI=Qk z3+7pCO=Pg6NgcsFF8UJ7wJz{(cYfkS_UdZ99+mQW3u|dgWa=((8t)%eW^&PH1Fs7b zAD|lFy_(x5{zd5XSpY8+0JD4J8SKOGJIl}GYH_}kszRz7$+-kcykxicU)O$@gZQuC zE(7o#C%|4Y-7;R@oWIF_`Mt~k`er(~*W}Zx7i^PA9vrks)ipJ29#Vra9?B^oQ|H}t zyzP$SGrizz1opSunXa*h?*=BzcENSWEBml#?_2b}o43AZ^|2H#3D7|Pvr*kq3u`nH zv>)>L&~NK`kKqYAj>;`H;rO`c&55Pvt$Tv{_6xW)5S~83@(NtOsS7G)kWsTEs4JvZ zg@M#-lWr9(wvo-9(L7>D7dZs^;r>p=vzW#%H4oEeekU+1vl7cz)>GEr*;=?Ati~SB zHE)1l%Ez`coGA+o`Sx!noV*!CV^-vegQv7+8(R%WU41tKEj@z?`2h_5G= zJ-4}2VR8yi)QiPUz8%loyVLHP_qgKAnS#4>DeFhV=JvJK)fj(Q|01)AFJ=2p4)-f9 z`mHM4m1WgJT+#kJ5}&Q@$t{JkZ8H`JFfeRg+LKN2acy1e^Ib*#$qIL@+O0xH zfrA~Vp{FjY8NHJlL0VK7s7D?x^%P_lv|2y`r5 zSs`Mr>vHt73Nmrx6#>VZr+U(mr?W`rKuGJRU#WC8sX>*C^Er$JOj z(#vPw6%g+#J@prfeLVbFGLKlR%0hPpd!|E22GD^CXf>(bHWW*b*h;6>M+;DIYDF8U zA}74eECy*!+NFSQ_d=+@N9`?KWR?vzMHNl6Ji{PiwE|hx=;^A|#qFwJ#LdP@#&P=vfMvOpgO}MeaRum8IQ{Bwprs<~M-Cfq}M^ zcDL)G_z0H(od;_{_u3)sBz^+bK~Kz-vGg}Rd#0w!C>j&|-g8&748h+}s7KcuN6x;b&$PN)prhn;7zb)_W0%+glCStBu6L4H zAnf3ozCW7?rbTW81BeB5_%2~w<{r086=uVMlN!^yCRr3;t32uA*f?nbT0N23Q0`T> zIYGlVNFvNUt!uPCB`TP+tg8yk_=i?dc{_yQCukM@+UNrId3W6~Vh0HxM~r+Xa7y0N zt9LX^;v65qbl-xf2`kmErMKl?jA-yi5T({0cYaHJG)iR`$7dN@QoJzQ0+q)JXOAQp zj5nKHF(&0m4LTy&H?=$P~t8B_KV@FBdwFauXn#Qf@J^ZNAOOtxv zU>Y!P+h6+-Z{Alw)5z&;x4M3*NZI}_24nTinF%aY`vR$X{=ORI-#55O#b!Dyg$_*+ zG9|4euB`OzjbyuL+aAZ}3~OsD$H%h-V;`OH8nC@(r>y2%)}Tjr#KvTXq2g5Fhy{sM1EY4_#woZH_#v;nEn12lr8 z@kQ~9%MCAH<}X)TqI}kB`EN{}8+?M**MHu3s#{GLPJ?TNd3&H!D{wVP%|cU5aA^z- zb8t}GGB=QUw-9!4RXAnYIS+ANhD9E&AH#ORwY9Xl+M5KXZ}y@2E4ohOv4JV%q#ifB zaGIN5gb5fvBbS-JwO8{{^UpP^FiA(72;=I8<`BV}>&J?~@l-N81*i4+_~b_ao3NN8 z`Q%!VI|n+rJj;6X?$}Kz#HaR~@s3e15Iube7FKsaYns}vfuy&|^^+NvAB)mD=BUaR zO9&)Jcacyj!ng?muKnk15L{NZw^jNLy=>GgCsdp zW_9~dTmFaZ2sXph7DeDlZ;)atKD?)pl7>c^@urLAI+K zP@YUgAbNv?0RnQB2b%wa)_1}&k(XbbTyiBUlCotd`&amht2Xd@Z~fJ5y2e=zlC(Vj zdQ?`%#YXba5s6yv=uA;%jaOGJ{V_}jFo5=m^B?54HJ=%$o$yjRGUa>$38v+TzW9FC7GMv|(OziHqzWP`uIs1sL? zQ@z8MjgH>ibZp;hIiPoG4F|FvypQs|YC~Ep)Q4u{toz@eypCE*XO&y<@nKER*!k1@ z=#Ibtg3J1g0=j*Vj;6^ZVGSu{Bwm77?Ew?6AL)};B zJvs5IA-NR4DUe`wSDrGvTWwI%;T#T%^w4iA4T5X1>yDN+T6TvT=C~m~p5A?A{9RPd zIy{lBd7nm^?;PKskAz;dYS6~i8MUV6Dig=;DRp+M%(Xw#elc?!z|4|ArWONJW7u#! zp;)P#FLblTQx>v#REjUj7*6sX=^Sj)G56ekJ*0nTrM4v-3*ChDeCDJw_>)EXrHxu62!m0yudyu2kDQTsf5r;t8?W_s1WZWh8CO*Yn#+r|g}nqU``&J8W66=nJ>|YgmR*ua z6j^vLaGLEnbi=r5z8!>UU*11?4mzEYeiYL5c>mASLZvKK=@f0Lha*%9jzXTnB?cg> zidj9Y{7@u9dZ<2rwGM}qm=-T;N{F;1Yr?5Go>)9Ao&uLOUo?ykY+&ig z*D0XVgz^eRQvOZKV7(&tTT)Cg`7)RMo!%N+5U4;@@e-6q<_d85tEQ};irW$%-sG;ip@sL_p1+vbZCFEJ!?WeTGzEo zBY-=q%+M+x6%&2_S1^(Rkz01VR$5<3DtEHi*kQ8*yHFy%gtKq#y}H9q^&P!w-Hp5pja}jYO-e3V|qy`Q9|+xWg2(x@w37Tu<*ns z`lmzQi@;ww1oeU7X0gGysZG2y7a5LS1fFJf7tYUiFVYISm`PtGWqQJ?gouQ2mY!NP zdEzAphdzIN!vO8XK)yf4s)vUK-teY%epqQ~KCMUaW$)J7S{0Aua36MG1ghHS<)m@~ zpB96}%*mnSuXhr%lB#a3&1oRV0Z4zheP7wO{e;@AIE(FLI|_J_S)hq9*0zU(^&5OE|m-|KiG6{S%U`hqbhKw(N&*eS=`;{ z@lDf!=pL|Z>o8&Sf?geg$7%c_Ktf$X6WLjd1LPb~Z^~ueVLSaoWeuoe&qsr~p6t&s z$mvOo#~l~l^W1sBxd+9jpl3mxi-P70-wF5jq`xj*oRD~wa=4+#QkGwfGtE1r$C%FQ zx^$Hp-4}IF(QJXetmdKd`!(1Ql03oMflXdwS1TwXSZ zS=pZh_%RYHE~M%gz_8`nB#D-M^L^VIx-DTc8W&$80C4!vvh;2|nSZyev$jy5t+Y$? zg5?%UU2rJmf=qPz3+X;i|GE|@-5ygU@*XWAw{e58F4OE9oYt%npM>~vN;`0kqJGxo zuCiCzs_CrqU-MX7{tHf%D|O6AJW@zTdR>1{V@f@p>R8(D6yujxvg*=!7JPRtV=hxB zTecd%Wq+IB6;#IqUwDzt2K)Z#;_Kaf^9qdvV!Soh~B#68g-i$Y9HY&jIPAQC)@ojg{;QT~ zL)2v!0kK&Baiy~txy_%$k;8){bF?7YPYp}9=h7QnI={|7k#&vrlawejCIyXX5aLg*q;KaJgd2KOUF?S017an?4 zlrwm3xK(`67g$v$E*a&{GyVkBSdDXN1$*W2{A{=gr3goGU%!mJ9ppvjDL2!4C-}0SILxciG@;HGQU+`z3vR)Uw44^g*^iBzzm1Gx zha0|pQ!I2rqEa5jb^*J$XPQ8bnYZwsbluXEE@DMhP=hgYLl1>NI!i3GIsY8wl2J>O z6oWC+4E{1c1xK`&Zy?p(g?HU<&c9_Pa?;hypco3OIW-M~G$?{wHlDOhUp8nqc^WL6 zEA!8g8QRVs7yFG2_v_u9uvnpT&+3duXPS{ai6a;()nO1m3Hh-)33a zWEdq=PB1$DieO;t!zUz}Tdt^SxwBqZ-mNl(J^gosjRI67mGsR;-)Kl#CKG0EJgWjk ztZJu5WzDz!Zi8Jph`~WjKYHcvD96iaD7WRz_Z$>heAP&6D4wEgn?=$k6GQ4pwJA@h{ISt8y<4`K@e-U z7*55!+3Xp49Xv%Yf%+#0!~L(tk;|QbHQ~7+Koi8~sfoK~fntBGeC!J3P6Yk+u#{CS z>gdX*VD&R%;H!UI+9IA~S+X*SGwsaRDT-_FDP}`yzuWGLCeL zR73zebz25JRrt`AhtXGIqh)7Qw5invo?ps0+q80z+8{3M0H&XObRsOAynN)k{+*9+ zvJBv0(niIyavo6%;zL%+++Ha?;yS0uyMEh;WKB;TiNB+51Zhf+%!!IiZc5n@-oQFJE~U`^?CrAC+6D#$D>C-L0nBZGIfws^Dz}NI zJZd3>iNaIRliOMDO!V;cKS~&U2t%LG!}D=Yde!!%(fGdFdOH7H1S+ve!I+C|EYth9 zO+?o$m?y&CIkw0fJNUEXCP-O$E(4Mzrs&UY^ZQ;(v*#fvAA4t`$ze6Wd^dpMDVvE3 z5hyLy<-)r8A>K8tNcYBTB%}-HRq)I==4fk zY@WyvLrpn4X@acBK(X>OVGQ1yTvgbPl2kbO`QyZkwpv7SeaxywSfOvpJ}vNm+pzZ! z+*#Tg<(CZQ#)|8(UVR(a${B?`T3aV-X8o2h;BPn=(9|N!@oBd?5c09L@HWR%P1Vh{ zWtZ-(?@hhGiy^uf({}%v-KzhG}%jX(ezDA)QZu<4CAFVD-P+jXuoPX=!-KTu0U^EIwA~n@go<;A^&pE)XLwU2qY{sjtX1mpj z=6ed7%BW|AMbD=4X=%mwYCFycnAVm?(Ho$okR#lP^Z#%Fga<10q|hfGqud|0_Fsh| zxz{6j2)~c#eDoQnE$A|swtt=Q6`k3R?#Zjsdk^>a?*So*Twd7=ecw2dDP=@b1uqa6Ld@G|4kXOkeQ^EiLudX-fAxcAcz@?Oo`dYdC)@*+%^ru-MZcvS{8wkUlV>H` z9pkUADR&(~IhO~<*t{S*M8fNGz=ahAhbZEr($ZJnWeTEozM60we!RRfS)c^7*4yPOzjFQP~LMuZs zU?fhRJ9YNd+0%zlA3k*m4Fuh_HMGa$^0<6ADG1*O=84d@Bgd5DrzQ}Toyy~yBz;y8R zkADLT!OE#$%TLSGsF%kXF!1;%9|9DNdixn@agQ%rC`Zexd>8fztay&Vi{4Vm(nb>49G15H)y9X{0l$l8v z8avA0UyJlkZNEqEl&A|QvJ{>4` z4)R<|n$0Y2HdY(S!uJzfO{koBKmG|!y$sv)w>WG$*fo&1oow9oEH)Y2H!uax9div_ zg>%Q`@dJ4?urI*t1Nj<{==Qk-sM8~PBqIhnLi;in->N1U#$vY9Rn<8rdhnmv+&(hv z=Y&b5ngyu?2iB405~^FVH~lbewK9YM-tBPk4$3YvPAlayxlLXU?Ues87gIlY>nn%B zxEYuTDh4vQlk#)2Y6%vLu>;BkG8Ri5h?xd~yd@HP0>XrWnAxyWj*s+jBG<)|agksg zHfug9Z@!-mL$kFL$;;ri1QJ3wcx@UPkIl!XVfDbDKu*JofPWgf2Iijx{@K_{au-OC z!)B0RR>YJ*La2@5wf!b)x~C7&j@KR-q^s2t`b`U9T)?wD%i_?_IBk^2 z>^1v2bk?>fN&WLol|5MfO6Bc+q+f8*9rF&9#ZDv0mmkwj!MZvSp|z=5z$0nL3ckM5Iv%wayNQge62M- z!Cv<MLAbzc=dtW{EVtrHZb^KJtnaY!txzXHc3v)iMP6(jtJ0NplhGyt%AU*tf zP~M-R)Y{4p@bqo@jhJkVa^L@xX6W-E4Wz#)n+Dw*Esp_*VszNhr$b6z6&VEwse-;p zZ>}=y&3X%Z>d?{?$%N5jvoN%_$Xj}exln#0e}a+OknX^(Cab9guXI|SECo%J3%DEn zje&+h*xwLn@Yhy^ib9#Fx2)7zMC5F-Y=41HI#{@6CH`FVw%%mw(DBQ~w~6al+b$lX zk6mi{?Z-G6oeW}Lc#HD5&a^x5hM-u}i3d23O`_~hD{r!+vT*--&6S!9&0SP?Yin&k z(b;J!ZlXPT&U6^~j44{a?%=B9;4w-aFOsX0#)B6geB9$nd8cw`PNmsh){;EobjwIN zXCDScrCPR%59J7=Q5`4xU%4t&!1@ z3mUtZ@t9#i$FOEAsyE1u@<9s;I^@IUm}~*7OdiVb0amclf%|ODOpsEWKKQ-HY4KRS zgML=>deBf1G=hT;+~bh~b$GzywRkAG6CAd>;fBvou=0$}W~U~-nZt5LNByc79BW3@VXE4_x+4}fUNJX8e!6m`tZ2Hk=NsbEmD*4uiI+v+@r9&gwFFXz+(tw`HQKc<`52wmsM^ux{|D zR?vRuLi^?F4X3w3`_)1FwFry=1EIkmj7cazWJONNEqN)?DfvYAN$!-n(|}$p?H;sW zZL%(zG8LOkm@>}9=>@Y;P?z12T(a>Nl?xa&MUP``9+&tF@#)fFGvB0Az>=wJ3t48-Qx|~cRH@B-Zo6hV_ zYdIjIhqjl3ErLAgavv=C-qXVL4>isH(YeYiXm%vkhQJQdajw2~GTZ4MiY zCngu~PCxAN7f`k$CMRfZu=X-tJnC{foJ8MoPpE_Lt?ln|-9Vw=b$Rd(V`q6Awfl~o z4Mq!qLV*%Irnzvs^~IxYR#O+{tEp?(*PsD?ejtTN5i*$qeRfr9u5AY@kc<^C4CFT! zQavee9=McpCHZ24ewAIz=$J~*?qr?9nNawc>li8qg&lVsxcg`ZHmur@6AqH0rw z`iExu;IEQ&W$BffBDz1X;SaR11ry|HBjpBVwyo4!M!A@JGmIE`X)t`O_tu16G+3yt zw=~;3dGsLJ9Bf;Fsly-_e5e8AL;rfCx>PsP-pDpL;xH5!Je2GGuB+F4wNjX?*A zNm4+P3D>mix{A82ZusB2!Q1}D|9H20@O}Hg?|(FBs=E8u4JX`l zPSrW5J%~5T3~RibTPjPd%S>y%%kg@#($_|rSqJB2<8VzF;644957l5a&}|7Tpo)WAeI1ln^f4kiIMBB%K?99Qi|4`HZxT~z%*pJS zbN~(M>lLGUqFMwzBT~FqU?^!G)T^d0@{{Crql7Qv>l0&n0&*lohNnb1$yb`EIwZb! zh)6S0Z-f#V^cmUN>O~Wgy(KS)yI6+x64C<6pmrlii=kacS4$(o07m2x?&M5ccrT}v z^wSL()WGCwX&_kO&``(*Kv>mDInyEszg($5c%b{Fco`q%$9E7JtvbjD1i!C4l_#RT z#Ygz$@ZvI+^mCPzd&?qZ_TTjJ((mX)zJ1}T5dMT(Qr5h%s)cg3nY^u}wNvo5)5sTd zi0Bn`hgsa)+w1KkM@NJGV>EJMY{<3y4C<5+vlt#yz4s4gh02lD>G=;RCAX^0D~EQ$ zM%6+|$1+MusggrVIr>}GVrix_rt0g7cE#v7ey@afi$SVU>EQ9q?o6J(rov%vqK%z3 z6{aTq-G&thmyu82vqjR}^~IWRP95moIZ4OkecKZ${KfuLCFjVGKivIUkVbBGnE3qZ z)Av!5S$aj^++?(uk}I}O?LWGI&s!%B)fR52%?+)!9Tpr~8}$XHeS2%@RDaiCB8K<$ zbcaXDlVfFNB~{yArghR{sDu^z>8RJ&Pfy3iE;ww|tyGa8d zgj0GsM9G1aiJO&Gw?$IVLz?^gxow(<&u7*4S8oF%;^~!@wzT*hoU4~}g4{G4mUDs* zWpjcC#K?AGmq)aVE`GE78OK@=4!N1&tK|_FWp_Qi5)d|Deb7evodFM0_gqj21>p66 z%nJoWp%{^5qV6CCB=x1vIEAPsxRHH{41Wjiwt8CagbqHN`QZ&@yfVNQ!tKTucN1CB zkvKrZi+@(H)@->_yG(QJ%B`wvxmLbX1AHBpO7E%{46Ogd-u)BXMKtr<;eyO^eINgI ztAUNsAw~`-@cTvN{WTeaULQ3bdQhU7TmK+0y|~WsJr9#mN$8~YaDI;-y^|CB{y)ic zeW0(^@NYp1(|r|a&<6AG*;MNcI%&}dnZM|fd20)}aeqJ%5%s%}_nAE3MVbl1tbetqv3EQ5l8K=s2-wD8AiK@d5ev z4pLW~k1kYq0tf5LqU}(|rvF>%u_u1Dexrm**#3e~E*?AaPfA+B>3=smHbDuhU4zvN z=hfC$Zml2vlGaU}%G{`LZ}&9SAT#YN5T$HDS_7zIVK*;^aUmka;{>c#R|S+hSOoX; zdf)rXrTv?}fWIW*+Vfw2L-DFVy!W`YT3WdIk*AHcf0+^%srtpph&VZ&sD?`_zv|Q* zN57CWgDaoNMw>l09c`vNvha-jx=8EIswH^kCT7ZR`_X59Iwe^Ec)1RlSk;J`ruET%3}Yt5#h}C|NVtXhK|w zW%l0`^aq11{XE>Z2pSi`$~FGHng(N?on9_2e?eNgR$7V|TgHnHlJI#%=Z^3e(MiGE zuaqj0oyK@csaw_3-rF0bA(>gE6hon1X+r18+zT7^fruDO5CiUhYcEyyPSLr7Go`+~ z#%7zblP+De^`&hu;f{8X(L@;irbs)rclo{-4zHYYD_PcJcBPgs}Imkrg`mhJ2fpc=U~T5$5{>i{yk5%DAMY80Gfz;qkTqiS;L`OQOH0=fOFwHm z=A;q=GT?}iS9A!}ny!}kFoA}w`l0L|!$NrVqMj{X4@$_YS9k%rTQ9*~s6(3)Dpnh$ zuj zBJM3})|8f4HfeTD%8C2Ya3q}|f=rZ^^Y$wpRpbl#L-2YR4e#NV1H$M|hL)aIMK15# z|ArseM`Q@_OL^F>s71qiulPl!bu8holk?eWS{7G*`%&l!ts&6e5*7I&z? z3yE+PIUdldX995$CF5Oy576)#HSe_WCbFoeY4z&9#(oE_`w)8IRlRuh;9hc9j~wUI zE;x;Z^n{vQ#msANt2fs9+pIWXje?W129wS~G7{y*IK4k|{sa&25#XjgIGb;Je6KHY7k4&o+qld)my>4T z(gxw+0!WhZXYX8w#Gea3c{obD102l{qJx_UxA;vWA*EE(UT>2Pea6}oX9q%t+{>oYsm zJ8DKMknvH}tdwq1OLIDXn%O&S^1G=Y}b@6%Y}?U_q@=vAThkEjay8jrmh z=bQMELgGq~daGe$so7XZow@Rh3WN1z=6~t0oK?$N(9o;SUa+Nir3Uo^Bbi`Ep?^vv z-LHP`+eJ{Z^iSEKBB!O3vy*1diHvl&;PE?<&I{TCu47Bzn{#sUy>lLvnzvTqE}n~c zDJGa=(quxl)0f_z7$1J~@aHGa436{-M1#0=Ky4B%NMo)sBvgeOIjEs4le17WU!$9^ zIiH!eMDw@?Nl{m3gXrOvehX!HG_e~;u`IX@jgYJ(|^TU05 z^DBk~AB%Jv;C|RE+0+}rtLuqHUTu zc-nxhk%4;*&l#T4tS~J8>koI|rP0BIKUBcq>EYblzLqw|C?D|qUof~q8()n}FWM#5 zGt%RP^e|M+hifcUGeF$cDS86+nh+J{diS7BA_QlC3Lhn|{BK@=>4WodPMF%ruU$vH z4Aye%DQRwG$;m$unSFwO{<7z#M;sJ)1iW$TC^&wUfDhZ>Up1f5J*t6t=JacNKG6~` z2;rHZx-=HfYLm-nbT$xI`lKFcRdeUSg|oz2a9TL>vgQeH8JACGu2)6-cri_HsWw>P z1sKq`HqR3`aQH8|H{fjgKh$r4L%WDbBr2RG{Z^p^iR%t^n~?74Nu;RyiEhgPjHa>+E&dDFvPU-{8fuG(E8DJ>Ay}_L zoiEl;*CBux)#V0Sx7+}4h3!8q)UU`l*St#Cg_Nxz8G z(if_7CQ&;;!lNpQ=)C7vi6&RR)Vs5Q_Ba^3o!Hh@HD;wc_DnerkZ=v0i(YHs%hFtU zzqE$d!Qo5+8bx~O!JDNm4tgE0{FMLV;)5EQspJASZ*cGanmBar65;`B zh*$C{nexdv$0g}7=SQY*lnrn`2IobChu{Jl{y9FM+R)0y?6fPwWW&-6)h-XmdFWU2 z3+C77cUGCUlry;3i|7Yd3b7OgvtjAsG)R$M19CYKIV|076q?9YB_)qNzP)7JO6zXc z=GAF9sn?0ODn=U8lyob6RPXB%LgcPtbE%K!voTI3rbaW8 zHiwgQk)2!%aC7)B$)dNnh`fh}E7Stonxf>PiH=tSG}546H?g>^vVhXf(>$-y^=4*$ zsP6^v@5t|UCv6eR7U}SJa5$%u?l&u?8S0zu^V?QY?zYa378~wuK<>IUSAEDPyApoi z?0{bk1OYgaLYAef9Uybl<#YSIlponS0c6gojzpDkt{RksPHKr&FWsODg@aKsj`zJC z|Lm{C=jy{@WjyMzOa~*smD!YO*UNX=s|vSm*tTglFxGdpPG=)L`PrSFd!-qZ#zcoYraEeQ?+p}yOVJH+@g!} zx%@7lXJYf==l8BFEh<}5{gSDoy~g2Sm^?;p^D`j^X@@wA%n#L=m>7@rcki2cbLUhr z7>R{LcomTD2b7h!A_XCMm44NpEtv-9e5pcLT2h+~Bg*(EXX;%_4 zeKN_W$e}@hcz~XaZSU_M>lqL3_3rW4J{rUjAPnWfS1CWw$TWDmG^mQ3b%7t1snXK>CZA`4fy|I4#_ z#$`v6WapI*2Vfnvhh;rVj`3QZ1gqxM#=x+32MO^MTnOg_;kSI6PQ#g?i);hZ_q-;A zSVD+|Y0;)%55w@NozB+@-+w#)AgaugGnOX?)T zXcwU6=d!*q9}*(71zr>gaj*Z4;Cr~x1xAJlfIQ|9#mfnh-bihzUU1={`(;2SisM_t z^@1}mq>99OAxg)^V89p0Idx<>zB9f%J~=zwADxWuz*)et1mGJ1)yOySZIoc*TYRr5 zfmi`3-XgznT@5^*Y1F%|Y^M<*MQJ2V(oA3`y8*l&P(y6o6{WoL4pL#_Y%&cR`HIbi zWKpl@O12TUsCr*qe#8=q3yCxU@R!|0>MgY4jd$~Wl5Pdk!@MS_+LPv^(P^3>@*X0P z$%4o`N$^pnO;y&)S)BB>pmYn&i|a#CKHiNw`*SzZ1@jP}7=gq9Y8_b56;n=i5ba`- zULcUkM93eaeWEg;hVQGl^TWdg-v@eoh$!NkUKLxyxez&6&Z~?XJRIQ>n4pt#0_h;2 zUPeY`5rw9|D+N*!TK8l&kK|7HUS_@?JPu40V$B19+QQc%L&vQ~8>iiB;YvxjiU}y) zVK$mV^ZUyQIST3UR57tPAW$N)GwMofZT+#bqotZLfgs`iux%17~6_m#^<$v)|R zeQc5%+i{@lu;ZX(PyN6a{37>fE=moA2K-0J5YM;1+Q8lAAO`mA89o?396n$gtH8&$ zb+3JjkbYYJ)T<>_d2N}y+P@`OHDK<>FV5^S51B_C_`!~M_Q1!8uNw{*I_#|1PE}hg z8=4yAh;KW*D(E@8o%4{=L{!Be;E%jcz)#dZxt2?+RpV516`-`R2yEVr81EOxYqahG z@LNEoKsRzeW3d>&hm6`(ToJcvji_lO_*$`O9pUmgSuf>vyZDVkXAc*f&Gn&IKdyTa zemJa8aXs4!$f!^AhxbpjSg9Q#ed%&}o7>S)FWL!@vx99RO{xx+!xap1v;g3GG$G9|9wzGV0_qIR0tWO@I42`W7MVPk`zYdjcV3Y15eq zFJm3)ak<-W(A!VQ*Q0JfZa28H-LAE0E+K>L$B(a7*J;(bvfZwo?zT7EZA$D#-O7vE zh15G*Ofk3VB~fD{YJ?JD9p&^o+;+;zZwxql5gGbeL)xgj zYOu8`zLl(PblOey$LeIjKRQ4}A63_KHOLx4yD!TkedY16F5K&6I;;f47z9_4Q?ii= zh>>6nBq+3(d=02p(c-tsrKpsazDegka>ve4&j`J6yWZR``t3AqRmW3(!EW+!y6y=H zFRL!4b(4!VFjl3Hs-s-jFmX}Meart<`L#L?VqA#wb6mHngt&;94&l*(A^$LW>FreyC+KEr zE7}t{JKA(uhG_S9gdCJR81jZm0DlGgsfRn3uX>a$DT@zQ(e{wrrbNu_a1b@Ah9Ns; z+dk$VBax?}Mz34mtAA_bYxHXyRtzsA3kt2xTabHz!GC7F`ut6;rImE`R@2MgCAfIW<3@lW65Ub!(If>`e>Nu)Mm2nu`L2 znloy^9ZM5jPy|6zd^WXiWGh}g_FBt1^1_jU!xQxQfz*Z12v~Oefk&t->t-(BbQazS z4F}J@ec{MU5+M=AFD|2X&ud^cH0d8O+y@W*P==1uz0jC>RL^9?Vf}tVj;up!PZ^5K z=`Es~6O}1jdDqM2=B<{-Cc3rNR#Qn-Me6%&saSimv&$TI=7F#sP!UJi5p>`@ry8$~ z80(3$=9aRW+HlH9Il?ZR(rIaA`QM~jj&B31kD!FSx%XrOu*mR_B89o+o0Oqo7;Bi1Gt zXHsGU_P%0lR+O__`OaGhH`+-CLSFHS;ygH0v~0O<2>X*`fJF z^E=I7HInv0?Go)OZHd;e?bZ%zcW4i2FK9p1eyY7Z3!9ZY>(*Hh&w6s!>RE-eDrPxn zfmz{M!?X6yIyCFuS-+X})vUkHg1IwuiQKul&*ZMl-Im*&+n(#pWpabLvD{SdQ0|W0 z1Gy)1&*XlX`)TeMxnJl0E%$q!LN`lygO1Qqy1R69bPww0Exp;G)pw?%OpHtqi@q2w z1T`Yy1fDP-GoG#cZ85xYC0xB3{=(~biB17`+W~8%T~6fGA=}YuVd%}$ZIVJduts{^ zYh!Gj1^0H1FbT3R;17#*@?h_|{m&Z$E zg^2=OTBy?MBLS~HLL0lIEfeIi$s_v$w4X5zzHDf6**jWEhcg&<&|SvPo;q?vZAE?? z&4|4%Y0@wH10o&njVF>KuG2$C4#5tE&j)GgMOgIpYJ=q2tPx5^Ts6`E@| zjjQCF#@a98#Hrg55~OF}p7+gP-@F7@!_~_^eX!oJa-Ou7hL6C1>EWW^Nz%=fHOY(0zOVjhgdzl>F|*g zl%P7ZXM^-}i7F^qmtS-kQndC?L&r)_Cll^G-+p$3L3`7!2AyzAe?G)Lvo{<1V*?8` zi!@sOtp+VpRMq;DYPq&&17BT0NJ}3yXj|VD^~XFr*<&0IZ}{^&@ZL+Cc2sxM)}Y02 z_N4-mU^s{m^bGF0NPNETE$Qc_#SBwNSvUvhVeyE;%>afWZdHFJ*GLTyfh76Xp|-_g zns))WleleN!Sfw8qTr-5Lo@lnh!7)>?`~R?^|0M;Vs&Fhqm^Q!wO@|_EP9{xhy0nYOK z@g8uXhX|Mx#mNov;#isJ7@arvaC!}%Ov`t!V?x01i}zpXf2;p&_wntQd*8-+pdt7M zP%o77o54D9D{m5ve4DR8EEX&9Hn9WQ-)7MovQozfgjkUFo_GJ=e%S`Ex5Mil@CLSz ziF)IBQx(IQXpfh5F|N8rRrgmuU~jZ-v)4IVoJ}s1tIOTv9CQviMx6ulgKj2*SA16j z@s@*ltBGUHw2O1ezK5IP>~2D*Pn=s&Fi!J`DlE}%Hgz&o-SFDu8L^ETE@T%%(HUaKQ~+c0#hgqJej+g;by`kXo+M zF1y7b{R|e~VX(pr8Ka(MS*EkTWlQab*`Tc)P^Ky+Xet6!M_p%~wI1h3`H2w%KBMkt zVxAbqiHx8Ocep?q^0ifbA-{1gAt9p&nfrmHBS{Lqz!#x?f*^M9=-b`5XZG%QlwkYo zNHg?C2crEr@P&OLimTx&xpG{3LTwUEK?h}x#W-X`BI9L0ID8UW?FU0dAS8%BLI8eF zq&S~fPzJ4~$PRPb8Lxwu7f}k|dO$OtF-G;JzgzqFhvA&_|2p%}9T&xj5E8<;udYe# zByDcS%g|m&M{RqRxq5cP`leS}wzTePIn;c_GT1rXK4R~7%PEnSbzW~P?aEdp-~0Hl zrJo+VbGd{sm2~Up1d$gR<Nhdjb?PzVR;)Oi2CfvMr6!zX(83=|H&(qGV5+*8)Q z1$V{cY@8gL0O1JD1Hg*_ZK$;Fk1&zBMc+M<9_t$0b$;S>-xMB*M8zl=RoY|+D0xFWN-`sb6^8LxTaBUB6_Ju87 za$aj+Bp&n!a959~f1J?jcMlE@rbqEae`sioc)j`qN&D#Y)m0soN8Kr!gBFSr1Wq7D zIX#o^8J$WVB2V{Jl&xG|@N5I^wtG9hopRVZ@5cEk@;u3pPc^(4qIoxP^Dcb8u;#hD z$l|g{vWeypk{E(@uy!laDRjhblr=GKJwZb9``^MlFRefEXo}YH)CBKJYi;i``ZWfh zTpF}c^OWXF^R)*WI=!S{|KyWfqvaFDba&HG(`a3nsmq)+0XQm5zScw%SVyPJaTs%NA-rCg~_!ldVL zFJdS!@AVLZE$9?joIUyp3w7j2qrA{ZCxv)V5Sgza%0_U`UgIqxpL&27R@482FTz={ z{shGFP}-MDhPZ&=s*#SV8jOO_PTN?<#(EhG+sU-!x|1Ki_wBKZfnYG|_Zz~3KY%=# zfSIT4oxFu~x&TtwOYd7GDW02yBR-nslCC(lzR~M)(tHJ9yoS(z5A|}@@~70F@E7>k zskNMqx7u-sg|)O2TBPm%OW)Y!vbE5I;3S|X*n}rK?S3^?fd(70T$QYvH}P#Xgmg*W zCRqGdN=`0RTKXrQdy(xQ=^mvg2KOF^3gl<(Lw?3UOLd}*td-tF{(#T#pka$Tk?aff zkcZOsk4x&URVB0*z_ULre8|w&OjNcsmwPGB?c&^IU41azO8e~&(aFjgUv`VTwVh~; zoG_gu|M=12Z>HKOiYDor!Gi6LJ$O^MGnppFhI{vQr5I!crrk*{!Qi22us2S4V~qo= zd^j)H9wN2+^T)@&8KixTY3OCcy5`3E0&-L1@QG$R$S|Utw3r&uCNg;c@tO-9uDe$w zJq+)@!?05Gf1h<|U7AkLYX9+hPrtMQVLD&45fpMIl>A1OuX%!9mQQ3ZAzQ~~W7=3N zzUwu6U44sXjfr+3Vmb*&xGlj_$3mYCTqJf^jTS|yHh(i{<8bZ3sc+$&k$3Sx!XJqd z0vF(Xl+Y~K=C>jj+eT~e`zT}6GYn!i)Y0r|@9fI$>JHKBcT9V~d*gE1x$IpSuAdv( zgSObIKB5QShP)tN6&9jFF^=;p5wT>9h)Shub34zQX|0^E*W(W*{Kt?@4*;bFd<$)J z@ov)V^=vd~)k3FW5j$~a{5X4#JgE(L#l8?u;vu9hl6Er>udGpP&8_9_ z6#~AouKN#kL@TY)`vXBa4g3bU5!PmA?*E95I)YZe!RZoNlJ$73F8W^SA&I$i=lZ$4 zWzsAnD#+VSpp!Zi7o%Z%jN5mT&<4n4S5G2LH}BTg7{Wi6CZ-5@w_KW7Et& znPyHaC;R$r(+SCi)=kZBO$;fpNOVw}hy_zZ7Z*n+N>JMtGNx>lD;jsl$U056TRaX3 ztvAxgg~{&T1lsOowW8&FsY||nJqJl`hsM#{l1&pvfDY_1~&*OFMRDH2RGi zt^4>P?`iVHN3qvW({G<2{1Y?~@Y#HLlhmMXF*xOn5N#jP<=8yFfYeGKWK(-T{e#<3 zZ`icx4q7|JcSk}ov=Mjl2}*lpK!|AdJJa2}_FB6tO6-jl99_-@O?ydQWtVyT_U_)X zVeRv`5YnbUmO@NBoKJ3CZ{50u)_R;>G!uz1L2Wo|qIe_O;>aO)kPY&)1AM^m58=V6 zZ#YT-t*Va=+x^rw&cr#9E8E7`H4)ltx#T4^Omn57hTpb<&t?N z(tU*1Z;rP{oI`w%5aJ^~ArkD>`i|qg5BPk4O5 zM~hZirDa4`o6%VeS9Gd2uU7pj|1N);TBj|+oxvCrCAE9;kjo+3NiAn-0WG99v~TAD zl2@JR+VT|5Y5PE5xkg)VHg47OfY%};PO$i_l;~1w)&0Gr-SW_^nHco+yR`I`>s3at zoV}o>Y7E*sP*YAwm(@>b1wqi-J2^ACtw_`&HbRI346mgV0WFsOy*v5eSJR+4g8c%) zqd7;g`*N0G1oj$si(&!xB!b%%Z(t88{((J!9l)+r+>YIbj&~?FV|l1OrLbbZLdV+> z+=t*s1P>rED2B1S(D4?zU%7quTGR*XkIMNBo0EgQWAxqQii_B-IoD%Ppne`xv|*3s z;MmPMH(`%q?_vuTKf9Vyyort`Y++6uyFF(P_CU@PsLv5>LCzxVR>f)bod?bPJeHet zH#T4KXDoNd4D2SwcbHc34uW5!Ic~+O6w9#L==cl80Zg4;7eJq%#ug~*v6~fV5QqrW z*bRyyOoP@sA3Kjdg1vL~sDj5H#m-^#b4IYaic<()%Z~Mc;w)-^A9}w5y?+vocLsYz z!C*hhDa`h9y~2z7*@kfO5Y~Xbj@^saxm*F!e2-vSEQ-cohu{7h5pnGlb)((A*azx~#$~(Y)6w4r2L=d$9^dHCjK5 zZB^WetyGj@i)TEDkvYe)>vL9Pznn1>;j{rWpuXoT-ox(5*@x!+9irLq5uIe3@MsMA z_z&!boM#Z8e}-xPCy;5Sup>JC8z4H}fxUb6ZwS6d@b_ypy7?O1^5eNoCz(e74p|!g zNGI?=fPkO{UCaM>KyiNyDV*9N4)s|f=r(?h|d2GGHqlqV}xQUb|>OJ z4AJ~%h3V=Q?9$aMh`#rucF!m-Ui~kH8=E7yQT!8|jn>bgvDcyFyJ(F*G{0s91&V(n zKKTv8-x<{QOYBk9_HG3CC`#q;&^f|u&J`p-x1;$W*^wc~g^r7_exR6z;10w)ixK=C z;k^rcB0{`81(K0FvvM#zk_LvlR7l3R^e>!ycQFhpxX7$ztR5ct{===6r=@^z_w#Wa(Lj z<|5Cx5#i}Fnj4b4oS&lkRHL{e*4`=00me=ZQOkANf}vH5i56 zhbgLyn`!K>MC6jf!7@@Lb(FfCVm&&eKf1n09Yb1)rNg#8SA`l^5}M(_-R zUm;j>^)J{`bXj9e5v)e=($!z1k*3f{ zQ)r|qG}06rX$p-rg;tW2{e@oa{{T=+0|XQR000O8G%v_HX%^Im1Y7_B9BKdn5&!@I zQEXvzb7^ZrZ){{=R6;IqbY`Tx2bkQ{xj(FR*%1PUmH}G=X)(rhS1@2>1I9Ks#=YR~ zt=`^ecgjv5X;eniXnHT(d$HHOcVg2+O+rsk$R#%k(e9OV`OYlPz5MTezVG=z&o|nm z^PZwp-|~BpziUnmRcZa>R%ve@}aVlg5qyytB-)P0T33Jfs9=HWFw$| z4Z_T_YOOvpd>?4LWEH6Y)9M0!9rV`?peP3hx7?b7>JkWFlK|;D2%2K6t8LVNe+-%r z#?t}H^Prq;)#G0VAqw>|eAGZ;bYD*4fA#+q1l^!>pj$xd z2YM2uf?m8qPljeft8UO!epF{F^bk~agU*F+hWs~Z6|?|K-JoIU4(Pob^i4maZ-(xI zzP>@<0%^e5exPstQU2{e>g$e)@l1wtrrZQl6* zp7NtQQ=xS^U*Di}p}Qx|y+Ny>{gYPSpkZjnq=6gsO+TV_6|?m~ z<&9MZjg94%dCO|6iZ_*%HdPfg+<5T6o?Ddn<4TJrn%P#;&{$qulQ(bnyt(rhZ`hnS zckaB|AS^Dc)fUy(G=qYf`Md4T8Vg6nDMaX{-PuGu$Q{8xKFw7e1U zV)p4q@N6Zhy9}xYImOT>P*w`s%9b|#j~@Sbe~ZAA|8FfV`q5~%fw~P7HEO{a@}PNO z6!SpIJZLcx< zkO$(%iIvQo7=HmMDFOM}IcDqDLi_)()_3;*MV*yk4Of6G4|*8%k{y2mDF0#Rx``f3 zphv*`wBV`(t275(|6eo;uNw0XGesUO+J_RlRPvJ&zq4qD{pSz zqP*pK1NYCkf7XoL8F$V=XW%m)n~^`GFJN~Pe;)z~z zzW@IFub`(vfWH3%gwL}(32f_+e&3(_{?+fD6O`@yN5Or4mi{O4uktTfe!1k!hrgWm z<-K3*`{L~9pry~>{QUIihd(!dZuljxdVlJFTeZC)(Etb36O#F&<^Mt^aivO+68?Gy$^bF z0SlH7@P-rm1M~^>DUgW|pq~SIY=(XXWU~h1AR1yI9%3OkWPk+73yF{ilAs{u12!}O zU4TMR3<^ULC=Nv-3XD7hC4ogvgFUfXYli`U907B-K&QbDo`g<8XP|GPe*-&fg%Z$lz(wbPeCwb;0%>o8{sg4>8z7r{ zz-KU6vwdLA%At>-kAd#|Gg!CDKTQ1l!&xl!zkfM^Wucs$oLhcmXR_%B#%_nk=I4wp zm^8Lv^4L$Nyr4+`vGG44N zq_J@#qqL!xaVkSXC4kYD3{z-r<05d?HIe-PAzD3Y(2=EJF48T)>g#d3t1e8NVK$M}qIXO%& z;61=hKo+379Kb_h!*e`2fC+NEle6mB4FUE8{FW1(4B2u3a{>;|iB5t3Iw`xa02;jn zZOPd*37NEc()W|`$y+CjQ|_3ua>}MD)+zLq^HZUzHB*0^YgXN{Y^LDB;9oI zrZ;c;5?Ot7{mtfEZn@>QTejY^~6>rX#9`;!Ylxvbf!iQRqQ-Sh9>boaKqyYKG5XYM^u-a}1uPW#KW@22mX z{u}fr^bC3)eI0ul>$~^Xdo}mQ?oHme07voP;eW)xC31<)#2dXM8u~KOaCJ*!{q-XRe$1?#!D(mz3`QVsfB-d`kAMtXI4Jb@$Bd#^&-t8Y|)HGk1U$IXyKw~7C*B%`CRpqyrrt8 z3zqgSm7gb{KmWpgFGOGX+lvJ+URYMKEVk_8OZZFXm%d)UZaKC5=<;ut|9!>ND_&T! zZpF40dlg=t{p)x`09Zh$zuv1E9S-(;2tF-z3;oisee%uxRk*{ICX@K8e6oopglD}9 zF3k{1_)5MWKKevIaxxr8lvn;%h{zA{B}k3aVX8sYRbR@xHPn7`KUE3~0Zzo|c#H{R zy@3mdKT^>rJ zq=rmms=mXRv?k~bEUFIox1R0lpqy!(@%lM0=9S!%p9sbMtr64_sZ6hxg0gDb`U+PS z!PL_=Y(2a{pTA)vS{<$&D#jD~c3o$)ga|Ar2&_a09UeMj^N}GNe6l!RUxsck)otCo zFWy#(n+?|eCY1D%0Vf`}95kP%?aHKS*LtNsLhKSd#ZLJ1%=s%Hqi6I7^M`S@Y#l@7 zXPt=hYxy0Ga_^Tc0T_gF5h+Cs|ou3$q>Qi%4&JLTs=1FNEhM|U6{*($QR_T znvXMS#4z{?u6%7pWYyThu@^Oh&CNQ{eYS1+^KfgF5iC*ox!*h{PrZho9cVq&)$M5N z#bfEvfFJGJn|%4?!$##-&nT-Jsn6!c(|Vov7`1!S9@ur^+wLX;kh~H1ZR>+wr<)L#dPabm2MWLFFY> z-5{?U+oO4WRrH{?ljx%|j}d8V?Uu-MX6i*GI{ zS)kmH$~lZiUOf8d`7^_>DNicf3%Bjqw*R91BCdW`{^{7)8mDBD>|%vue%;FrR;%4c z*?6nh?6V|2KT946vdSD*d4N@Jb}~+uV&OWcmNGkdhXZX;o&AjGFB783doW*+7eYkT z%Lj%~pWdhU;x>^K9U?C;6l9wqPlDB-jWuex+<5!x*5lae&f2Z3H!fK=pQtIXEGpY; zYN9QSo(E5O>@a=pEBDvfiD6M_CsIPJ-*W<{v~JE!aM~KS7TaCq<_n3DQysmXy`BB7 z9jUItlbvV1$6?;j2Lk9 z?owF=qtAkqlEip1pGOcqM5gCp^e}cNUQxbo&AwIj1Zk(uj0t9Vpd4|Y&5E>;Nb*7K zy|dcQK7wc5G|lh~gDw_cTQdhMDU2km36~LYCTbE*F^e0q#0ISwF`1Baeknin*{*Yo zQp9jaTSu%7s7h9zq)`x6PaiKtiEc@X0@0W(s{B5;;3vBM;gmlEdpQr|#W{iVuo7&p z<=94o|Ec?p*U>+!`uUMQlp}ct6C4(?1#6;f>_+37O^QY_R4x$>{^qnEb{+qm`4F>k z4l63jE={wFE9DB=Qg~MZFO(8JZ=e3{xp!f%*3H*iII|g5k3nNynvfv*c(3R3H0BlM zDc4IFW2Uuqv-Rbc$DOahv38DaBYN1w^spViU>c}gfzDmGdSOX%Fjz-TRn~^MCrKUq*Wn-tP|j(4@-C`Mjv(+m}b5OPItK!6fJequb2G&Rp;Dz-j;C zYy7vKAwR5cm8}}(1@WWjWf&~r)o*0%Hzi*^8X)*w-bJ}dtVu_cCPMEZbq#2(N=kM6 zVwfaxoChfCVMkZRa`f?i+qbRFzbyZxPwesZ;K2Awyf^r>%-Qad<5xdQ{>lY=!a_WP zc9Uh^V(it0jZdv2_HL?~M=B7^YTbGj)qJ3ma5RziCX^80IY~UdJ>8`gQ^x@`Qmy35{_b*#DvociCT2I7`ZTfC~ zX=6=;K@00@Efq!RGbd-ukH~kt_w&EJ-f(F5L0q{_p)}j}jl6Yc^ziXsqN}CN7;k|) znnL;_bj{w%J@}Hb=QS5cI}aThuG_M=x~!lE*G6?wV3wNF_GAyL{?Mawx!j_OXc3wN z2Ha>`l`hI$Ebk&4O99;x+ybODmRW^#;9!j{~fWvq2^t zM!VyKeWUmyMe0XRh2n^u89TpE(=0ZU^`r?Ybrd>w0^2)l@h^ZzuKV_H@?!b!cULM4 zl;^f=FQr|gNE3XVKiG=$0e2)2XfP5i z!_hQKt2mxvQNb=+f>t;Z7Nj5%OlQ0qY=nV@vGS zTDYuEU$PAqW}VYrW8q`Nu~3>YM>Av(_N#w;TQFvVDzj_pD42HrmgRpE>^|Pl*0MC|iY!zX5SlopA z-LZQa&Dkz78YZ{{+_9^u{7col{8j!WzNVS8S_r3+F*KseUZp}VQVlV2dmP_X&sZ!3 zU&8NPhbli;t>D&jyYV;D?r@L@C&XAgDswWTI^OH%(}WUH=|!Wr1y?JfcNon%En?hO zw;Bn9^v0f$#%UEqGeJ8j8)pzrQGF{3Gr=eu$NUku5F|Q0X@5KfUp?giP`()*dqOqD z#T+qweI4BxM5_3Lt*G*q%BL(@>nZ18we4q&=6t6R4G_mvtz1VxDz8w5NXgz&duq#x zgezEy)n=YUG*;~Ip^B%(_I=p^Z3J8)H z4cG!rJZ{V-NuIj4lyTDzBRaf$YmH@0a!luXsfHb?l z7{5)!6w-U>0;96lpuFN#?uKcR@#9P)D-oHHkqun-i2u*dD;McngK|7ZR$gbw#X zei)AMK#8@hyhbV!+Zid0l!%T%gReHAO>azX441&EHeLj%Q}lbKST7=lJz*)5Je@k5 zI2j$uyq>(k+uc^)3R`)b73C}3B|;rc@)n-Nc@^&jMRuW4XyBW8gJ9%Mpqibx<2=bb zov6`c37YVq_6YF+L0_f5u>ZOTvB_$?e5*rNv+Zoy6@zQ`GjucIAc2qU;`wPDcL3p9 z@{KzTJFI0b1(q_hg=!-8w4Jdt4wuDcc3H``X*@SoVEh0r(P1V`N9ZsWW5QGd~OYE(@P)ri?`f*3?{ zE5f#;OuG#CeDGIJE`{Za#qyoX3T5dN%FT*FS#d?llONOJ!oDL9{K9tBbs=&&dNFb# zFv<)vU6zC;lyQq5zv!RVlF+u61>oxs%Bbe)wO;`5Yr};venX8f&7T&6HabE{&d%-Xl15o zl!l|Z?vDjSJFZye&os2d>^5UXGLs9`DWAa3jTA3kT31qIBh-r#H959u>=&8{qP&?~ zQfEOdDD7qiC(a}jTnv+Mx;~U^@RHw0uc5q zW6-!pV`3PFra`Yxj;c{|>y+GvnT|qoKUL`*NR8flZ+&EIY-4m&`nmMe(QTqtvI`W_ zPrU0Nu{Wf&7!$mai}Clvm*dX`3Sz6{YvZp(wh4AHN;@pLL_2U$_-Q=NvpfyEhidx- zJR^hx(E!g2!$|V*QF-~WJj>VX>+^LZDt^!NF?eI2lr<|AIzr}Ee>f)(RQ2TuZr zipI=Nx64JiNWf{pBu;a+=;k&)v|pJqovNejT#a_aB(&5 zY9wr6TU=x3XX1j$nZ<@7M$=SCE4tm|32&}Wn`8k+Lv~vM05)>l= zBHoud*nTL|o*W3Z`D6aLFPy9jHO4EvR>|tM@vOk`Y`P>>nka=Gi8vF(x`*A7C?UDs zoCjAwkEowR)OoUD{D7wTMKA-x<=_l9lvOZRS*_f*mD~Cns?1dFaqr?waUsZgFy`<_ zu8**^SMr}mjZ(9y=WV>r?c`f{tJ}i6B20!$!`ylIJMt3r8~$7VU7SDU{<`;{!av3_RQ=O41>e;hkWRHNFaer33&x@$L8~jXrlfm^|yId79{YxJSw9dU!<1 z8&w`Wpya`?$Ap-l;3c=mgQn;V*@@T4ldI%O)$-(;iFO41`;V>$u7m)B${ZD`(`iToVgV0Oe|yA&byK`-o%21*=QPmOiS9 zMBM2S^!n0Vi`lK;kE)eJa^BczC&Gj{fe|O!RA!_RzF^!?; zg#99R=xm_pFmd+4iI3#_fKhny^~d&ASX*jwd8z6clLUr*M-sU%sI-F7Z@~WmJe63w z+n>R%4DHWT=B~|uO>gi>Rzm&gdPHp>d+^ViB!9eh#C>1{mA_COW3vrxPa^lLd`6w8 z8r!j5SGb=j+*7x5KB~O%@#ep|@NTZt(;DtUf|B3sl_KU=V~5bow?=|dSmHbQC|*U1 z$K2@5^O|P2iL>EAw{slM^HW#+PeC z_PXFcY}um~3T7iHq=hCqlGRa^%e5EjY3oM3qR96mDW5kao`*SWr5*7MrAD!E)Q9*z z9*=-{VZU2)`Ed2Vv0LOfHT`6JO9#${y#PtKhf|$VI!yZsdo#xY44K=^)pAWhI(eXI z;E=W|T!zhjag8!{ZD#k8I)bKo&PAKd`WCYpcG_JeiSpGQb*D_QyVri4#=XFE!`ir3 zu9X?2gGPTd%($Hl$`@{bb+rLz3^kMvv)BbcOVG)oq5iI9f*Ndyyjn2pso)N{B6!?( z8tWhONS%ayd(Usb{T0ew$sbx9b_iyGcv|=tp@lCNr&WlCKmhgdUXKS)r>C;vFqg#s z@#fWk5X#E$G{-th*LjIoCByE$*v`g-*IdNU$nL>|nA&uS(Oe9>qe&tV7vgDDKC8;K z_N4}~-b~8|kr3=|ivd-fDudVp;PKC5Vgw-YVfTsisQjQx#%eA+P^Wc~2HZp%oh`Nz zM0JRXI%D{bN>@EtxF)`)5WRk@Y7w^zKpFKb$b$w%3CqM-t!8fp(-1@|_|k2t;!)Lc z)m#(KY;jc9Sz$+W6J=ysp2h6Bl#MYtP*E4Nw$U&X2y=1F8*uY};*ivz$@C|Bf^UeLI)Tb>sSdIcDuT~6AS*93ny>xtVX$T@iHw?~=CCznlcx+LykLoWj%P*rE%5Zt1>Z~gckp0do zvNnj+P37zOnsQY6r>c}wZsW@Fzsvur8SLmvq_DWpq<<~HV0#^*Rub~#f7XlzRA;z0 z%8Ok0sdPbI%7QyXF)E4yao`cPTrozZH1c0m3^O&8?n?GzMfW!6>5H zQQQowyu$9lPv@q>Zr)3T0W1%r>J^B(Y3$j@0XUN9D4R4w1T+A&jLMekpa!wQgZ|!O z;@shpPvkpLdG6w?4>uZUQitnEFdPdQjwNWo+%C71#hV=_lCr}-okv@RUN2B>cUOo; zoQX!+FxJr~Nq*v8*{^AA2*=EXJg7>fyAwUw#s0zvl?CgI3JA4h?BTInH3L$cFO8>n z?g(witd=ZT+({KSSJ%PY_g1|07^*z~>-E1M912I<@tvyG+-7bcej)d$=<{}=56Eea z&QxpDVq5bM%adgE(xnSToJ(>k04`meg9du{S-ElSUX3WaSs%gqgIov;xZQ$}h^9T= zF_eiJf;FOEY@&^%o@{pLEpU`_c$+br4j|5>h!?Chr!1bjlzW-mhQAr(!EOeC4+NI_ z9aSpT>g&SPwMe%JRd~ zk4Ss`5fQlWcjX@7`}xCoT(zFA*E_XbEmOtSQVhVTtc5nwCOhnOa_$;5Mkc5(;KDmp z&!*;o+kYSsYr}nB$t(C^-tP_t&;Wo4UR=EdQ7@C%%lkCW5&NiPgzO?a$yQf_jseFe zM#gL*cx{8DnIt^sL1l8VK|&5Fw{$LUeGWEDZMF<1NN!#v*j!!^+&mWZBt7Bq;NQCc z)b({}C^!@x>_L33o>&+a-5$!1d#YM^8)l|h5~y-5S?{c)^i(6&WX-qlvKPAaEqAn{pBieE?yAYG!~9BFh{F)}7|n zmRBvi=p^tP2VH5p)zxacYQADS2@HME7Sv1YO$-e?RGizzQn;(JO1X<_wCCFksRpu> z3tBI`+FV^!m;I3au;U=`1*3L9yspO4;vl@1*tXVZI_CD|C0BbU#ylbY9ag0ARu^PsiD@>g4NM3vz?$E zq=T}93*0Tu=2~kL%mHwLTGLd}+~EX+AVVgE6T(Jaw+XRk>@iaq-k+>; zpUr3WI=l|aE;(VUHN{4-4!%_gd0_P-ME$_2Wq`An$a}|IHLtX|O-ABB>r`(f+|dM3 zkR9?pa;K(vzrC6vxMsG7t#r*Nm7BSBaJ{7S8S%G|cv}U6{fv@v*LUm)H^6nk`~ILg zoX#9PY)TjIvexbAh!QTKJAtX?3uE7EDBv7nl+(>oL^W+A*=87+G0u*2cDJ3g!)tW& zHzv!>F;m%vOVK z+TA6q|2}x-L&3{&Ko@c$CP>4ql(q4`fFK2cAr)JFs54#_EA&mR0g=J!jddC1m!)lGEy8G0Nevkwo?OU5j@;`=Ii?a+9*B zVx3@1+QK9cAiRf$)gQ|P8q&^I3$^{?5ou6_m6dyt->bUCHlbS_5QPC|w1O$7%NfBh zhD0gU)p@e}WZ#8>tFghgNb!^TPZmFEYtwH;I(8>YLbW}}a;FXWDRy$YTKV~33&BoJ zmrst3YPcvHp(CsS`~X2X6h6^<5DwX`k{L6ZNQ;%IuddldZwLH!45?xfWj5c5uoJkH zC6W4?n*YM3k)}=>=R7{Qg!OhLGhRZi?UY%KQ?vooc6j|qd2RdQexYk9hz=}}?`&xW z2$17M58T@SsSJ0c{<^_x5x3|H)|bB4P^K-n8DV)jqoG7gz>0^BF_Fi_>O7|M54?5*5evD@5~Z9w+O7LAVy zxI#D?v`3ubeOK3gZRmw<^e$ZkTKOjhi|KHVqg^Zm*A_3%t3hi{>ii7u55?Nzt%onY z{&~6|R=dYE019!|W_Q!}*VxyvR(FC=H)Zw@;N;QUk}3al)&;@e@!r-UB*4mAU39%c9C>Z#GZ2XfC9)v8W&?Nkb90uHGnto2uh3PH<{F^HXM z%C@{0>2A*gY(byJ)CaL}jAfz(U(46+L)DS78FHznKasp75fY!`qWJZDa;q8*+c<)8 zYx(RO*3Mx$F!9-0e^8EVHni5h{B^qh7*!hTUwu)u3&xI%M&{l08Nbi^aMMa<)qhLHY=W_qk0bw4!Yy3ve0r%_`U903w%i>#&MIM%V(|uIAzd5)mW5V5pkE7L;y$}Xi7r^*Fju*$3auN_hekM;AW4^ZA*$a)x&z$`=#oT`m@W{igwBSN6Qi zt=GMp5YUmq?0J{lemL)PxC?QLE}$3@`BL`0OFE4h2S35(Gk1TS85tN%RV`l9R90E8 z2YLUz$%-fseI@5=_V2FPyBb|GvgvPU-bxH-@B@aVUcmL$4P-M`(U3ZPu&uYPpAbEQ zNA$#6W0_zp9LRW5DKyGPnF!wPY=|^r#wK7&2@>oYi36Bnv7)stPkJ=`Up!rWa^r@=JzMcpyrw(d+0&n@ zudUTLRMj@M^#T#x{fC-0h=Ry3j2&Kz$bTHq$SzI9?@sbV@IRO4*4NSICc@(23}xu} zi@BX0LLx(`Wyjcl`E`waUv6(Oe$7iL>vFk4c7W|?``v?FKm1MZP>&#Hh@{__;7`NK z>D(r+26%*7B6{bt%7Y=rtW)W?_*UfgLIPgvM>`2unDY73IrtpEJQCa zBoDTqKP#(X`H@_4t!KSwEl_eaPlC(AlQ7%=I{g-QdW7dYiB4ZA4iGR`vCdtHRn@tL zW`g;c{)qkrRvwd|(1g1qeZf|7uCPU%2P;!^GpEu=d#(ab{fV4guL;B=$#5F38#!yb zgvqNvmG71tex?7c;B_L=*Lp;X!cnfKO^exRnsPauHq!1e!QCZ0!<(_?o0|*riS?T* z9#y8H^W#q+TZ=6-Xjt6SMv87C4cRj`IU8{_6B_P+Noci z`fbPY&XaA2;Mjiu&W)&Y%A}DuU;X@xp>Dgj4Sz|wdzFH`q}&6W4p4_LqiXr}u~GRA zjf~~?h66wI5z4FbmcQo?q}@`Ii2M98;T){cV7<+xgSJxB8dW3vx5ZauYj&DS^NE)? zmprOGfGXwhDR;_i-@cSObP(66ct#|726hW1@4}hUx0&~_V_iHSBRajIM0Q1s*1HR^ zhB{u*5!W9C{K2c_&2N4B>8EeJs_axYuX|zPvui$-SK(^;$+57!U(+KFN5Vl^$_V{E z=%3hKKbwJV-)k(ZB8K#3iOtw^tE-zE3C6_%?l<~dLM?cdk4jNsz(O5;kv?oF(^4)G zyqz~&QLReUldK+lc*%>iA3am}E<=2DKGQu&l=Yo9pT$OpecmL|>hEj?<|WK#s5oq{ z+h^W~tu7e)2e2{&quc>3o#Od8q5M{vsi|tooVYg9dfZ3wUdipnqPd0Qc260u)z#Z- zF*zWAqd5^RT}E(r#tGm&=d`#jSapdYmJ@&Q4-XswGTkZPH&&%7tYVt&1Ygfr?a8{$ z>$nZu(TjZ4?F+(zs2FcU<=?9gb1^cCt2xnikhE{n&IHYJ%5 z6HSCu;pDXD?gP$~*oRZJ`e2)h7`=Mt7biY|y*g1_iz;^)?Ae7sAurd+w;w)zx-~)S zqqx=JY_S?jx0Y`y+Xhn!wyhH|ly2;ExkS?~jzmJC?56eouxSrq8((WGs34A3>`Je} zw(c=hRS>)PHm-aURbGC7g?#7b^PQvpc=7PLhKtz25ucO*8`0IqC%Gt-aV6lE>U?uP zwz_cWyX;1cxJR*c+MV5q|5P5;>?k@aqw*c2$4{Rd-lyEDp!s|E5b}WhSIzNQcKsa_ z>(AQlZWC5hEO;u2-}{FK4r1#5F+{#cQ{*kHFT!dY!hSPhwlM%AJ1s6V*8=M!N1ETj zzWw0*zn#_|EFU0jA$?pQqea?7izY;uY)QAG0|V_tZ8)17^-WF2;~fW3?)}|wJP?F= zyN$PEMTHK11+k;LVi&b4dxn8>n}CkyM4kl_<)kSJuJ_lZoA3g@s1Q|Pm5$RJ&`l~6 zYvV|qIe?&S)ZW|KCWfN87>&{AF~AKPdF$-uK%_5Bp(0;O9jw5a8GV zG308kFF<2+Q<6`}o1m<~`0DojRTLoVgyT$r7ufbNGW)ei?C*aoJC zt*7_UyP0CxD(}?zg@7dZVZqObLa4GT*JWdz9KpH3MZqQ&6Y~P0J*uqEB?YJG#7BJY zK!|8l0Vh6`eGjibgmin`&vhX3Bl4@7l8FBBsZhtD8A%9M-F`y- zT*p{Squg?JgFH#jdGkGRN7h|Ya+I7^uP9S-`N4nJ(5@`%q5+)4>@J1@eyOr$YJbS> z4-(gazwhR8Z*IUEG)ko~2An%y!UEpxJ0YFE-d=;PDIWd$tD}dmy*09Tagn3i z*hF|#&Ot+uEegj&VW2LC8(a4ZIPbK%?bwm^8WU}&7@SwJ6wT2XdoEW1yM~2*b~6Fy z*yHIq%Xc8^_V;C6!&^mrzy=#)Q7VOv96ES-WOTG(xa43yoJjg(QNri-1wFt3xdKjq z4`N6gQu>rJ*^)FQTiX(;Olxafdq=vJ3R>f)kO}cRogN2fwcDIdf?>9AGn>ptlWBVK z;nI=fBe1T;VYU(uD{W)#X!mNV!Y7=lSOzP$sYVM7{@?d*wSdL8>xR zg1xY+P?^ZU{FdOQ0?JP>em zKEemQBQMMG(-_IiNtnkgie5tWN(n!zeh4`fhg0*f7^s4jhTH&*wjiRiFO;Z z{djX6e);;rrx8$Z%UI8xY`w|jyEQ>iTXzs;0+g5Z!G{s2gCT6R-9kH#ATCs-y)GZ! zYU?(3nfeU9#sC8^z5WPj13WB_4`?==EV_8@_>s%!3QoPcX5a2j_=W3+r;&lN0G zds}C;v7yndZD=xPI&si#*I0Tc@-njYpWD_Ti)SHS<99E}#^=Yow`AkR<9p_0*BU zuOUar$G3p!_3<`MX~r--)YdxKW9%p{X>O>(m1plRZ8Z!HxBif~zX{}JkHPFjem-8M z3H`7dKFaU(nUJCV&s;WRR_8?TjNRmNW_$OzfUadS_I_i#so&6Jj8L%p8RX%y%`=g2 zWR3jEa-{I^f`5@ddD{-;#yJN?Bdg|sg3rfwnxL5I@q-D{66J-5_dj>pf{~O1EWX2L z0lfkANKvT3co>N}S^x2#b~~)>xxQd|_RPfn*|v6lHxpTm=#FRt8OiS@`sxs^H9XA{ zvBa7aCdB8o33kk&ZHywjTTQU~6M5j$d}QbNLko}}NNN1c4rCsp{A9dh4#;|IyjJ5A zya8_r_V`4fi1Kc?8)w1tQxZH}x)_X(qMQzzFj=bXM0QwS`eeh$yTPz3{<&~IqGn|4 z7@@JX>0d}(Y^>ZWS3H>|D&&u!Le?SvZAe&-Eks^Hs^!>YAkxV*HPwhR=kBaKQjZiJ z2mXCrzIUuvv#)GJw)*{Y4D^ZYkn3k6PayKFF>)b@|1m~B2BN)V>IpyQc&G}bM#jje zK=f1jKQvy~RDCjL?8PpPoH^20wp5j_ z1s;IdQQwM1M4!(?1X96pQ1oyT?RUcJTBJuGSc&MaEJYqc^zsJ_L9|i+;4u&>+vL(G zLHwBf!Bfa=q{v?AC_I2r98Fb{)l?(Q@oru~sdi^0^5>J9?yhrZhK41??~^1!g1wB_ z<;CMRL%azytX{aMieOzF&7q`7c_`d(_d8-t_C;um{LquglNXmE@~v+#MS#oa9ABc5 zCeDBa`K&3I5MZxG-UExGn-@`ml^7q+32uQCSPva!eS4XG&N>^sxv?^T3tAB`P88!j z%QHOd*ZH+RtzZ|-9LY7Bs-6T`isBqD)ZuYRPQXiHGRj?W`5OgLi3Qpm<~Wu^sU}Cg zvl%uN==T(%`%15f<1nZ-!F1JkFp+0bO>f~njQFIG!N(d8+(RP zKN!3ZAQ-SV-n1{{Z;uXy2fTf-7!tz~^rW_bUm8E2P7j<$hYg2J!#F2$U@m0BnWEBc zfQ|D3ArT!p5r)-Q=i3^h!*Q5!1UsKe^CG^0kz z>2={=fO8W5R4^4vg`)m=AQlLjLWWQS>@n#C16Hxtxp6a*zo%~fOX#a@8~gL|eZyxO zu3?wYb)Fa^22ZuW^Gj5|sYsr(^?lqY4Y#_{wx|&4LBovS>A{6u(r4q$6s)vdU!yq@ zK9=dmyIb3$16X@IrA-s8Fx6M%Dcz4Mw=})9zVMYn`Ht)fKKs~YOIZ zZ&v+w)vs3t$sikSMWn6}>Q*+$_?A&T=#x$IN;ev&+gu@Bt$Xh)jpPV8!%T<`c!GX7 z5|$J4E;kwoh~YRIb_VPLeBRr$-=6=r332-Df{n9rPKt8Cq{UThL)~(zGJBvAcTkE& z0nPFju~D+aYPbCU*l#r?<>pA@RccdV!>kYdpZnhTiG^%sC9Ujlp2oFsCIfI#FM8kd zUi69uAfMUkDst^~!Lu@#cYT2!JDZGk5Ppkf;w`M++0tTxjk?_}n=s}6M=#4u8VECI zWUPZoPz>>KuPV-kgcxq+T3E**5(0Svep=|AuFIo`;VT#3PQHiz>O{FNNN7oe-GV!E z?HncAB$m%!z;kY>m$aDrDS7w!^BTJYplredW=WFTihYmPZ*m%)Eez>1r|l^U_C@`H z>_56?jwTNU(*CqRCQ@#9u&Hf#|LT|?cKH2N2uo$!;~|0!8@rADg=+?7aRMt znyRJPSd4S*0fee$cf1L_j7V4v1c)yp=~x19ZV+AkN5{Yi6Vr1e;Dm(x0khvMlzYY2`SDx&#V zAzw<>nRtF$SxZqBwJG}r8z3j1VcZO6<#Z0S0oK(Ui*^A=_V>?y_A=hCYt?0RQB%+) zw!nhx7*bH$w5NP`AY6$vCfaC5EuuBx!c$~F)z86^+yg_tSU16OL$N~x(xvH~iudsz zKbmmGoe4bqy;mfT#nMC45G;iFXb_d5#c#d%Q(TwO#~a&P;w>?u7gnY!8qH|9Kh}Xu zxld9wpdMTMHjr}4Q0~T{5Np+Fp@k; zvma^5-OJev5amr(i@wTSBfy3A$u9}M7zrY!eDNlfS00(F#8j28CbAj#kqJk;4Q9RB zuW;?bSkg<#PpD#anr-Fa_O!>BCVz@wAkvZ>+0(2eapT;!j?cJEV3^GDx#<`45R2b>MY+s^UaJ?@hktI|8~p#&piM0<$2z| zm8x6!+o+?7T)^qHr6cJKPlu6M^;B3WQaVqYEgHb8jg|*u^?0ErS;x1?^ zD=Ob+T;-V$*9+x>k#K=dY@;0%828GB>ZG^F+vbJ+HDW~_MrER9sU4- zehKSy54pRE1HJ>|K_9H!E7y#Dr?;_LPR8kWVajynUaj5jraYvx+1yaP`iAEAegd%M`^EahqwQyeZ{b7Q1fNV{ zV`{C)i-O?o>B#qjAxp$0hv*=bS2fB+MfHaAr1HScIX{rY!d_{Q1nf#66LOALfhrcS zZ6YY6$z#H;CPAzxdCF(;wfexs8l_=RN4uvR9~h9j2T5JGd|m!R{}&dSrupb^Sp}Nb zjE&S>-B@9&0(Od~hH3;xnq01VL&?f|M3W9tWy#-bW-IEfZcT4jxGhSayqP5%SVY;b z(a9rY`^FyBmu+{N>d0nmLrt>*zF4(r|2+KF$y=42$NppapDrBk*uRHlwZULG777O= z`OAAwmtVqvbA9;B02$KyfN_f5o_>A`0VeQO`o@N4M=8F%Yd08ShASMZ$1fDjDp-kM)(7ZmTZeKFu3ePd?7H0OK^|_^&nVn*j^jlgwKAmJiY0Y*U}B_v$Y%f zx8`EXDmM~@>DD&HCAtHM+@k%S|Kk}v7__2}h=+K>1T&S2@s)}Wsl5n*5Ev9*Lq zcejE@wQ);TG+p%F{iJ5v14ija%HwetWgeun@FsriW=wg^jf7y@+=fsh6#zc&g3Dk~ z6|dxj8<<`^$c$f&YR z4@M#evpZTS3+~ahdL6-jY;Q-T*GKSTkoV!jR*i*oFfJnfSn|}EMgOca)MK0kH(2SS<8>c8mP)xW#Fa>lC-HPsc3eYXf**Qv1w^hOg^Q-;ZZPzp4@I4G{) zJHY!B>6O}09FwcG#Zs0|UM-*G^}ZWfbGtU)k{`q5rRRQk`^(ESl!eNRo2Sh*65<io8bdkEN z@-N0(_2z*co!i|Wmf`?dGwo(SQeMBV%=%n;Ce+l~)F)h&@_XfVJ#qo~g|BJ)qOnnd zD(**Hty{*IWx zqpGReN-k1nEl^gjQD(rKE&DdUhsz7bcIjFEu=4nO%GATk6O&k;;|L|JX|W}fe)5p~ z(5U7Qy1tVQq(svKnD|V!9-8cV z=0xRXFfA(ua;~D*pJ?xu$Dbr@0c*(ShLzk6#lsgusw(cJc7eZtVlIR z`!Gmq{eb#ub+y{4PN@&7Z;pF# z-12d2$9**JvvD`Zzcqev{I&5nvs77+WzEa_vu3j9Wz7fL4O&5aMfKbjxd(Dj z=U&del6x!nPq{zlY4YyPBk~^0o0|7}-rISr^0wra=QZZp^0>TkUMjCMuQzXB-r>A6 zc^~F|k#{ri%e-&$zRUY--j5U26XA(D6YrV$z(jl^d2H+%z1QYzj+xl_ z#CQ7|3(56hlyh+o++pKb2f4_lj4xGkuxCCfywcy}jdT-!Usw`-aBH;N+=UPDov~mP z_VZoo?f;Z8%BA?xE{xyL4e!SeX}_+0?=@h)w$MN`-(nV3HyU8Z&ARE#R6`N+3gxsO z3=}u0v9%w#dIBiCnkCn*dnl9GesJq_yp)24JZC=>b%CUBm^02Ij1S1E_ z;#S($>LKfLUbf$CtD z)F3qk8f~3-G@Reo$7jZ9gNYu|qxTXuM(=e<)M(M86DC9_QGSV@AToL{K@cru7z7Ey z=tlI23^52s4PJTg@80*f?t0I&*7NLjp7YszpYzu_Yn}Do4hJO-KiIWRu#x7M@RjBJ zOfQVc!a*tl^FLWQgHI1DIPxeS`tjGxVZki?ok4Xf`}B406{xD3NZzZBglyw6`r$y> z@pAOib^d$9st&=|m%8IOf~u)}=h}zgGx()l7INrM)iLUIO!rQ(Q$8+mzy(kEjtuuC z75ydJ_eSPoX@?EV-&3i)b~LL|+8{dL#-h^sV^&Q?aJ)PpuZtS;_3dGU3*tCU`=<{* z!z0W$?h@%yuqErU8jeK9PzGr_KLk!^Y9dJ!)a^P#oaA=&vzD)F0>E_5E(oJG3?%B^ z#-P`=5IO0y;DA*)<}Hea+E238mEw>NH+)r1PvcO(DE5-_3!~`D^Iu5j5ig!Go$Y@2 z5_{rJd#<1JFUK$Lk-pv|x)izKTLoU+Bft9w`(+M7Gbz?}bpI6OBJ6bYBrCD$~ajG2e-!?P8&6N5S5hAes!ki(KF?;Cz zu#{V^Tipbr?@|rJX3pP`J$!|j?1q2d`9<`qONo((B|aXGZ(yN>-UY=TRo)(A31IZg!%y``u7uM8EUW!LdIN}tcICuNrt) z@n={67o-e$^FAEt@4}1>OxBvA>kpGfx-{EYoX#(h5>baFt@!UBz$=Q z-YKjSj#Rv<5@ehIm0zQtdXJ~nA>V250Ptg@5n6m%!B2A4>D7^k$yi6 z`abGb@tO7Fc~PWqA9ktb#kQK;CD3qt3i{4x8tQZ}vx0%?p4RX7xa>yFj^Pds0q%?s zv*|z9e*DUrq1#DLs~UaM8iih~n)CXd@SEJJD7k@I@70)d<$zTg?5E78)=+p4zYBJ_ zxrJB0y1+hiokeP6ad?ZRd(Cc+$UBEyt>#SyendL2nL@(#cA+V<{3Q!Zo{&oN(y?Ns zOSWMI`)})biIbVR(c{Eu3^S>h+)J}}16CAd1Xq+H3Zsx(-#Te^9Uv68YMGD{)B`NI zpCMr!Hso6ZkQ=qNysHZeoBdwyAawTzfx2D#dbx4}&iVTNMfvQ=R}hIZYot-F^qqy) z&5wr>83iPr-19B(y69YF#lk(DZ`@9j5OLd-M6^BY?3~~~!oTF{{<+`FRz~!QlWwTx zzJ6XJ+iQ9ehFms^($FZo!s!_yq^49&8uJ0{$Evx<8gr((`NKjtspA%Zqqv2e&0jv43(f5bG|^*XoH4#vW}@C{lP+PWxpd z&$AxiD^|1!aj=N2<}%N(i4DOo55`QPemV-}ryTy=G=eAxY_Z{~E|{b8t#GSfs7i+n zTPK|%WecCWzAb1nE&9k?M=>GDT&K^(mo5$`ch=(edgA2DQEgkUu7Wp>)1xp$fst?K zOELrr;)0>Bb&c*Vx#F(|%XeJO^3{q*Ic@MZtbKI>Vb9*gZ$6yj#u>if8n8Tow1l1> zywLreFY3~R@xaxkTJ3AK;%IP{yiQa$%VRy7t~4z!XDQ}3vaN(nn%d;7=Zs%gN_v8N zCWAg6NQx~*5u`%qZ}b-RXt)@RV|;bH74fbi#hve*UKLGD`uZFl0HiKv9Y&vZD!(d8 zEECA% z`J9${8O{C@-wpc1YXlrD_iZXEKd zXe3ixIFuA?>*wiS*Vt;lS`E20@`CS1xG zlb)sL7b;`cVe8gdR_w&sPPws|Aq1I0e@F;akm&nBqc-44;A@@0VIi$8VNhB&W^9kfYPvZ#l9X z$D(<<>w*ev_Lakbon0zM+1T@m=Y$0cB`ChbX}K~8cBNU&yzV-=XIBAylvnnpVE%z< zxT6`(AhYxeC4;7I0`+}`XwUY;WJ5FKx9ij#>rOQC^!wThk|*Z~3CjmzegV`v$;nn_ zRaGzetOonqf1O$j&T2MVcMU^XA9eK4t$Is1Po+!6U)xn#l0O}w>0RtWxdb*z`X;r; z4m;WaeD&`(fTw4$EpqG?Rq`~4v9Be#8y))xdRDCJVb4RHGEs?|*}>SyEGYqd#2CUN z($Bj6g>8G`rpI@7!(Zpl?Y{@HCC^+~(*Gi%U6hsGOo5fuzmIJ8psTB7jJP?P_9`;y zJScjNSA~D2h-`@D6PPVDR8p1&2{bN#Bi4!cfJ$ zr&~p1cP>5&@cr;ey_Hv4F#_L(c81Fw)&#NKw^c02>!LI@sr_tRlt|+tlU-{kJBPMf zIoH8Y$s}hn@X;C$HPHovsU+j&+1sWf7+8ds-t%fQeIywHe4v&T>*qR04V*%+ejBQ)cn$x1IYNq|>8t9pvRZ>K!brx8H zM0;jx8?xwM_VJeasem%JjkWgLR+PG}U^Em7FT+{06v=~g>nF43gm%k@f(K)3lpe3< z>h2Yz$sac^;Pun;b+3x#&=?79kT$ALbSF25_wgCNMiJ=+;EIq21pZC<*U%MLBo%y4 zt;Of({P>;8&Sqzu0NAN4Ew9OuZv{{J-7x5u!c$o;pJ9AXdgc4xr={)QpW1tq$*o#> z7ZJy1O{{|Mo60G{i<6og&V=48L51y!I?hZ6vXcCP#R=eGP;)!A`(sGYe>b<%P zc0#O$XYcd#ro|kKe{JyrawtlJ3o*FXUC6o+khB zb+c3JmFFC~zDQ+Ej>E&=jtVlvWHqQEDKUd9WpWr{61EXW`QNeI6ut#s6Kc{Q{>cCE zzKA;ebWj88DU&s8b8E+0E2pDkqZC^jwTGGsWF52t`O99VGWstbE|eYK&LQ+KYfMya z+nRgMEFX7^nb)^1MJcoIri&JG>3P+o%g$!O1q)cv#i5_^MirIbP9d;Hn#mRF?`r=7J4-DYdTJ09|dJ{7c# zD%}%s2W6S!Jp}&xZ-xt=7;FFM<#y7~HeaPM7AS<_7TyiCvfK3B9D>Nz@hlB(y<3!- zwJ^2wseDu8xy8(hmkFR!s|mb%V9&%sg8{?S3PyTE@nlO%voxjnmaN6{x-7jU zxwXA9iT0nzRW0z~$!zIk$C1xtA}ekJNY`QA#O9X8=oIZi>I_3Ybi6!qNf>KKGRM@# zVhbreWj*bW*k2{xM;^XG#DX@zU(;H|E2fc$HZJqAxHhB-ye-R_J!Phx zvheRSMk})2(o`d6WD86r5Ixfmb6}3^&a?YC zhetdjt6B~AMw?_o)1RPkfi@nfD+=AIfA96ynflV;Yt%f~SV3>3D11_XuTo`RkY|OH zUotP2bjqgrgGN0?P(BUQv{*;wydtTV>9s6z{ey~NRS5XJ zQbjjFai9@AxWDWha=Wd+n6j^E&9as|F+-4PF2Yt8wC;sLeZ8;zpuZMQUoO}mqu}^C zIv|5oBYC4vhzz8kbA$erwa}>$+zmnqPLkk+Q^pBoyhT!>$fDoC zHAbs1pi0oZC!A9y)X;HK^RhJPT9j&^>f4wDwdl_PEWi}7ZHZ4{2MLlC8vhubrx*l$ zUwC@~MHWwMF#F?)r;NxfjX?+|Tu3HcsC~pWX}xpm11}uOLGJUmeaoqVg^|F z1Zmtuf~`rx&E3nd7;!U8vOlTB% z?h$ks$1HyVUW+0>&GZx&2xMRGHOF~mf^TlJQG*6C)xR>5h!%;O*rtMN>IhnmQ6kU;n0Z&x`mNe)J4KO8Xl;;oy(D6IwNx{4$%7gm3Yf38H zFx4>Sj0z<2aSK?;j2z=RCD3F>;4!PN2HbKgq@6-xxTa*?r`xJ9M4O2iFo6U&_D})+ zjS&YlIh3CA18}4gy%$t<>mmx!ABrWF9^Y6hdcTQXRsben7NlimJ||MVlmpyWf-bb} zE1~^>7isBZ2wV|mbj^0Tl{I`*5Yu#Jk9%lqU%oKKMX7?(kYffC!kHZ4#4x6A;Rew$ z^D@T`tSyPp>1BG>s5$44C>?8AkG8@vz%_D0DeRKUMOp9rhkaw5aqvY|J9sS=h^%W5 z`9aD+j9^~7b57K*6TzWLq+Cm~!9bWuyVisPI&imnD>%NTZ&$C~Dp zwS%lgoz+pG(_aG1GB_9@+Zxc`u{)AbeFo&yFb0Mwmz}a4P&|T!TAxc~)aH}A>;nsx zAemKAjUB-2PDsji&)`;!Fesa9mk*r_i;7sY(-qp>_COK22f_mgNDInUqBn)(Q?4`XxJ5G>mSK3pMPyv77dDy zTmH=z9SmS35&>F4>M0#g@{ECHG^!q86J}tBHsF8FCe6GPF~~s_(8K@;q6g4l>4TF2 z$_qa!Ja<)^h)4qfcG-Xc=vD8(8t32NJKm0gfgWCe^urMdw|}x9uUPNZCcVZG03dDs zUy9@Z$5vf={G0u^{`i0KNU=`}S~R?I+CTun0|Wq2{2dVp_!B!30t6u3U7cKgUBnQN z5&sNKQ`W1{3NTK>Tn&401z-OU_{TV<00Jzu?%w0nGcqy0Yo-P_0j&OG@Csv;SK;{htZ^e_HE*sDHID!T(|ZJ#)W5D@RQ7=Re8S N6Lgg{VDiuFe*lPvsYCz( literal 0 HcmV?d00001 diff --git a/scrapy-master/artwork/scrapy-blog-logo.xcf b/scrapy-master/artwork/scrapy-blog-logo.xcf new file mode 100644 index 0000000000000000000000000000000000000000..320102604f4511d26094cd4c964ae767151d880c GIT binary patch literal 52428 zcmeFZ2XtK3)jxXgNS1BcR<&hGqcZBvy{K8eSDR{MTb3-@>dg%T1Y$@?LQ4bkg^&c2 z5C|b3ACM3rgc=ATKnTUgOd?wch&HdjGec;cCw8a?U>c z?6dbi`^-#pYv(b|%}1IvO|2b`f*^Qk1VKzgga7>Th$8+3;#pDfLqXt!e{1o0;!)!X zT4Xwd1&B^YL)}u+@iA~K%x;wg0 zXcCo|jVGEL>NQ(7ZBhPxNn`5*DhaF&U&6YG*L-z@tT&%LR)3_mt68&kaV!9D`7+=F zY~zhxt*11tT}N6PPm;g8sp(|nsU-o){huH=625@oqUNe3sr*hA-Z&1?A=M+@Umo}w6|3S^+(#9PjuhdrI?r(Kr~Ak&x(aM$yP43@dDA^G;FWBtiAfO_L|Gu zzG$7CmByBo(en%@U-E%70Mc8o>%a^kLMyD6P`KD*BehLo@6|kcq;H5z|)53Ry_AoAE*8P4}V}Kr!Rm0|M~pC zYfUd-8@B(ywmxg#6i)x2{CEurr!RYuE-=~nncG(i@8Pw2p-pEWztG->SEeWUC;k`y zFrw1uufF!kiTY!=XbL*IoAFjq@els}Z^npkqW{BqzAQ#CO+tDk?!&7)Jv!BR^Qny; z^|v&h07yxXb|n&~iM~wqXe1X+-CY1)-BEw?WNW*oy76f1jh!17UKjBjPqyCLh=k^{ zjD~Cq*qwtMYrVO#gXv34YZp@w%`HkYQ;#?G9j)~z7v6W&cQxOLyiF6`*tPM-lkC5t z{$%6Eqpc?zkDO}l?$UIeIsxFnCvM1wc5K<6v2EK@TC~()&ee9~$y2ROw`e*Vn@+LI z?wdDub{}m-+Ie#$GeH8r)_U|*i)P1`)WtN<4Q^>{ZEiWG*}8S-m2f2~TbvbDxbV;W zg1fO~4hX^Cv&bTTj^`;nFIN*anUZWWEk9Y4toQ-zEO<1YRv92-EAXzvsoX*4cS%LDx4Zj?Q0*5$3&H z-*w5$WDh|KcM|%d3i3L|b|WV1DYzcRi|bw?Qjo&_DtiMWh1&@_E`$rxHm$r$UM9Fs zD2h3ZbBMHM3`$;xNxeasbU+9JhY4iT)EF{YDO69mbHA`&kUZZ-dw%l4J>f|0q>LGb zqaG(2*ViG5({-O8@?q%)iEvjZ(mY8s#pzB|BiD5`LLApO;B;J1C^{%a>MXUpurRJ4 zuo*8=35_6-8VQviQIYKzguV@S4vq8)pm<-$6HkTARcM(9o&`Zg{%ItTE50@Gm)u7?TxEn)8vmJ65^<6^Uy zz*pHS$CiOzVIk&uKyv+wz&ZlIN$xPh9w+Q(!uArDNZ5UZJxW*}VOt10N!a^@We}E3 z*j0c@^Q$RN6@ifi#yS?>|}q2&_~%C=Dhz*{9Gg_u2RIA&B>UFqU9x-!LeL%4Tuo^WfKKnub8>*} z`VlngT=}pIE@R%Al=L|_hs499)1%EKua#XrRCPQyrSygkyGeO31^iVGqTymL7fJLR z64l5)vP;?Qw-NCO5!VrM2#7f$xad0<>6lW8tfzSMrU}eUy1;WRyv<5^uJ@(KfAkn8 zGoN=lded`)*WnlEVVqBT3$=7XhL+gvz^<>LkhV~xJ?IobgExj{jKbKOI;*d+k@yFu^;2S(vd z010$M;4@SYLiK>GbwdK6gkOWk>FjxjqXsR4Vf1if!{5(e1*hX$?*_yD#4q3Bs-=ENqGHJ8b0=7P$m_kgW{DqLyF}uvPjWd*w|*l;_6Yd$f8P{FzPM zcJm9pcF8{c*9Z5mfv<$M8uz0gXTp6WupsBmlUuMYusa?`Sz>PYKc@e-1<92&@=1wy z==!-I`d8w}D(LDXSm~V3Pw28EDoZ^Z;1J>Sr`N-UO6$4tq^~~*+m=SR!b0Kn?shxe zaEuuS`vhc!kSF8_S$a>$N% z0Hw1<+@oUK1Q?|;A8wgiQanhCtu*=hTQHO$Z6}567WT1fp3CtvDPln@>2@(<6>#aL z0g+vNOoD%sKv^3q10e|f`NTd)?0I5?Ih2BpBli2m{)*V)%dlA2cS(1K*q;(>xDtCi z2X?LD=6?7~(1q+e^Q*Ayh;8^%EKF2!wO?Pt>Mt7@*b5ZvAuRB`+D}l1)?G>P9tj=? z0lbfWg0hWVNuY$5r$K{FJh1JFSg;L37OW1#0#tUo? zu^X1Kf4dUfj-DlH)e`!xE73|~@F`70S%BBC#12!epD$tmav2sTxs3xiE@A(CCHA`< z*cHBne(^Fik|$3c`+k?f<8dnUg)7i-Zs_|MLd@waLhswcbjm%i^^VOtuMu`472-BW zD&U7u`nDbZfEx($)n}nU>R#LokB4H_ny++topp)iTA{%v`QJMh= z;;#G2{dagSB)_1nA?!WE?j|geu-|d>de0G9f$Yrn3Ax9YG36^@)9FK~l7$x#c$$w$PyM6-Wr%zDu-@*@^2h0*?~7 zgWTH)Q&=LHi478~A&i z8wtU=(@02}ut9M3q2>HsNW=?G0FWPoMq1ZQiYJ7Pf~%14ul_CHUx)Y7vT@S>&?tK# za;?JBTw(6wR$(UR{uj=h>@U#TJsu~qO=LFIz%^LM*3E2fgc+H|u>6KQAA9+uk+CUh zc4F}FPu)_qoJXB*Qe?sc=Dq&ZJ0trcN&S6=$V9A#n0(sCdL)X{r^*?Lxw5N-z+fC@ zrKflmfus?@ayKvpX-5!GI{OH$a05eS>62A7=4%P8bOY70>sN#^J9kyNVP3N1ldG~f zMQOHFfAb8FW4VmNd#<2>H&`x6d7mOn!uABZ`rpww`z`w&(Q!2Qs((i#;;P=J3%T)2 z(b!)JjJK{oekmFf|Akm3Zv02zz_tR*^cy<*GISb8X0Gz3SZ`tjzZAQg*^T#?qA)5m zTS*eX3az-cuM~MJC!{$4FOzsbu|N1KW&DiT-Cv4@CcPL8CCiGhz+=~e`@3Al{}>s| z6l7Mo{k{~>RaQcB4Xx-u*kL37e`fE$qBUNLw!fBETD50yL)YoY%pj5ewEW*`)+0Il zCJ*vgaITY`(z6>FIxg>TojgDLhx*lw6qkd8WvBHnCbCyRu@#03|6O1;mhR^NoTRh= z>m4m(%iPI3fBxPScf);Os>bCw2ww9d)6m8*U4AS>dN1rsnu(npHQB+Dk!n2NssKe(Z@ci8vlufqO-*tW029wheO zufnDi`?IgY?jZKRzY42V51#odtWs}y{Hw6nvge1t3foES?O%ocF|oB@g?*RUv@gXX zZCW!=P0#R}_&+s`b*dxxJ72L*2y4Ke{8B7z_$pwTv3+3z%dU(^ULOe)TaW+t1Ea$^ zY54Hzn|FC2iJJDzzw=M$#^z>VxX!@Sy*M~I4L^*;S=AYSp9PzHh2UI%hh6r?@&Ccq zIOaRO?wZC(*u2T-C=V_DT+hOH(g>#!qmVk(+Oc~x?xW1~r- zEYHkM{^eJ{dxxn(ybVEM#`cE??|K-gH=O_Uf!Jh{>P-yn9B*EU$w*$GVmPPn2yf%1 znBOpJJk6chM$e;*;fA`IQ-7D&?b5+Hv|U+%N^2|(xXx{C2k|T;o%QARJV1)K$afrF z!2AHai|Dxx-1bU6Y$G?Cl6|I;eT5Zxcv#H9Uuix-HE*H>qbl}vox&eVcXTgFhloGM z?v;f88N!~S|53`lTe$vD68qom-BYPV?xol2Az=s69}xW*`_5qBPJf~wBl-!VjYR*J zXyw@dG4lbXo~WD#rigu>ScT(JV)NO}l*rsosVICzv{DAViRegbC6rhPnQg=xIov+3 zua^>Uw-T)YW4JON^czS3Py`ugtAyx}*lOhTSu;Xv1MDDSdx?FDtv?W}RQIBYJxkaj zVp$rLoNCTb(Nh@F%E9ai(OcNzgTyLlmk?r=v%*neCGn!dy`7YsN%;kL4S7xiCh23C zd?pdCjLkw3ayg#sbpSf8M?INJMw37ho%V`}LEwY1@VU|7+}RmGWHtw%BvLVzM^QPF zMDA`>-l5GEq*+IU^!fv~lqBeh>Lf}zIE=&Sq-76dgGhpMzW+W^N%da}(~ST~7Li8r9C6oUy(G^kVDVG;bwB*#yYK&1$fgV_+=(7zSW?NQ zY>?&8fkyEn(#lzK-bAycak$>5yenTJ{x&YZ5}ZFGT1g?AxaQuk11)`~Li_ZxJDTzE zRXDA>Mwl~v`pecZNaP`pyZ(BO=o6K)vtZAm_FKMl7jz17&pNPqowz>aJq&K+@G{>Z z2(!p(XYs)VpN=uqiKLkTr2UfmBFmuP=fZ~Zd@*}lWnD{~vJi(knf{fEqv)lrc2i@g z*gC_OavDL-adu%CZj5pX)&LYpSbq}#WWa}li znv{V@*<`s3ew=;?MGV39I)M+c)lHz32rW8SeutMWNOF(`oPKt+td8!YfVD{k<5 z^EFsmSf`~NyzqRkr$RBDhwL1EWHVgK+#i3^jWYGs!s*p-Hsj$dYlJmi;G1Z?O1)l! z+e4;M6bgfp{QHPZq~X4fmZS^E5cA7NjC*MiD+t3m7EbOuy4`^dT<;nbQl0OVaVk>G zM|8A@Xc6MaT(+s(Rw>dj=D7<2{IC||54G!2K*CtfTQ2vd4T!7 z7X|iy3g!T_2>BP{i_>{ph3zL@H;8^wlV5qqB#P|Po7+)$onKb_8gDt42~NM`AEO_q z?KfGdQ969{kN@iL=X8$wQOs#9{O}L|e8%i>Iw!U7KYeQfjCtjgSEv_zj^#ZZGO@$H z!&VzBrtwR#R<@obENKarjg~YWf+ceXbbX)dQjY5ELt3faW4%cwI=q8ZuDkPXr##_7 z$F#-Gj}f1Q;LdYbFT-|m`t1EJ*C>$`+ZQRM4ZInfcYN}h1ygqg5SdEk*XfQP?muFxJ9?;RTbES(n(Qmz4drA82VVf=(gwIg*}%Q7dsq zdW;{?lWOJ(y@zUgtHykGXW1jHl}g(!KLsD^od32`nBnB5m*1l2dv`NdY^M5^iHU#RPuGEwkt{tdl>Cv? za?bs!g0-3jZHa>2r~KQGzxei_etC2LYF1np5GX4A@3oZ6j)z&eSjPoYT7gR~zonLP zd81&Ji=T&406{rf@s4Qquyq??Q`?^%mF)em+_qLY?b&yQ3BekuCd63$d0yDeyloW| z@6B*-$~Eq+udqdMy$QQSAz-WE>SU`HEh$*J)&_o?;QAd`;RRli$4*nc8h6zNHb1q7 z&F@i`rzzh**}4iaDVRkiX?!{HN;T~l#Q%Z#G~)edDbz;3%|=CUjD%|74StcV*J*^Mnq2Q&hjd5PU5*+LU*0c|m?f<74B5Ji{u~ zL(DZs0@*yr++sRa*hKBZ$If3xMcoTUNK?C^9jEwB<;3w3KVaa9y#3X|srkYGu2vyt zZ@WU<*szMxC}+*}TI%dsZjJvq4k9!JeMpN>`%*L#l*B%wpQ8OL+`AVo8LNO0=ySIc zGRD+P1l@}S_P(y&>GA5=th(P%atf|J+}=tjuefQHjfg0se72&4pRR@2h$N>OozuAf zzTEr4GKyT&hZswI=?7ejpH=E%?<6vxc@{ur;|IykvwN9ENwGX3xa{sO1Lb+sW4R(- zW$Fl*#ZUl*!83PX_ji8!^k3fltWQ5SEzOSgyz#TPZ6Xd!)85DbHLZK&m>*o>->%Rs zLR&M{nL%j+nOjRXUykxeF6fh(q2$k43C9hS|2|vy6U~~LG<_{Kpqxk=8I{v{`6Stt z4+@;Z9Ogebl}1-4MbfsVS>_W`jFM~K$n`0Lsmz-`dF2UKQtzY_ng7}{(pQ^E{||n? zSn{}tn2fX4vcIFpY10E$n>5+opMY4hYA9aL93nJar@Ei#IC}=8_8tcu7W&cjGQ3!G z5%Q$OH!&Jg#DJF=Ts}=i8saQ`jvtJGP_=pt;&gnAab_;C=Fzj9367l08~*_tcIxjA zMkt;3N-R6_wYwhpC5D)YJpAw*eG~KZV;BB-XRfDkTEvGZwy1?nVb0^WxnX}H9iK9J z7a0qDa+)BOTX^ezkB}-zr(MWV@aY8XsTHi%O7D4bPvKKOX#aT)+v`4I8=nI~d;J-< z9}(GJ^-s1RRoWhZWc!aE+|b`4zR1AJGzdGd6wE=c|B{@Or51W zLlMYl{I(zZ-dhftP%RYfoF3zKLozSW0{C*j@*??PL^_-zIQ@SQ7lqRvNVYv2n(^>e$QP!6bh>kY(dLk4upfjA*WlB~QXmMF`q5#Q zbkNzS8xi_^5P7R{`tgNt*a>^^Q_^@*n0xMxPYo{m)&roPU$&v)C5VabkEJi2!8~vl z#Fp!(5eE?{Du<>`1o9FaH#KM}pIvaRVl$e$=;4lJYJu}LTFO<-ZeXQl2I^9;e11mB z*}p+(sEyDup3fz(cgfbJ4D|~%r4N{1N*<50JsHck-*aqBSF@e1oGy>ix5PcA98+of z(ld(Vn>z{;1bl4{x`RbO!}?jDU}%T<39}FIWUzsM z{CY;1(h?9-a8g-lPvZ4eJR+Wz#rVp?sT(`$iThvtsH6BC`me$BWgm6K#~c^`ZKKfq zw}Pk@cOCDP)8v`g zI-8HT-*i&mDNl|6sHN-Jjkg|amUqaGk8eGCBA8$F)*45S2DQ}S{{(ijf#uHr~CmP$j+d7ZSo8|Fm>f1X`b|3HTXg}W7 zd8|d=C|d`=ex&L6iO%-!me$s`rk0kY@&TK@p>~2;y&*L+p9de>NcI<2IXsA7euk|#`@v?sWp@zm&-6y-6+uK{(4u9*z$;rRnw70GMh#V*DKIv>b zcC59nqpho{rK`EKM;@DzUu{0rbXbm+wa<5)=<4e1?C5B3?`Y{f{D3@ev(C&Osyoss zN6QY=ef5xo_;#^~i;c!cx93>CEdsF9$=5{xKTU%pi=fUptj=?GU zgPQ#fEpnth;C!^D>v(tPVGhvN*4omr_9c*m8P7aqX?>4nkjF#48?4+foxw-DBc+}q3baekU`|1zx zuRV+qZJ+!_L)X_jfQCX9bq+cb;m3(8oHC zG`f3hYC77`yub12!9$I;O$QGj*pKLLe*IK?cWb9xZVuUmmBXl0M;aOqAFe%gsOF&R zATq}FvTfogb*(oZZ*Np+QLGo6#U>YNoktq#>%=;#_TVA$(0t9o8?MJKn&Hz^hZ~M{ zb#!+uPDbhZ=n?UVtG-rMJ5_V=(4m9-_g%Xif$#JGIoff&<7msj(?W=P$WVKzrsmLr zz1Lp7y9yEUocV{1UHHybcWV<2a8cx=M@UR+(AFHd;rgqqtHi26dEou-!^dxeAovV! zN7KUm7Kk~)gW|#212^ow`YMtmAM%$co@{`no@i}vg*;d$v1zXHC^^K2iTcBb>*}DB z1N--0bJgxDRh3jxj(GgB@y-J$PIPr1X=-ZkYH4nBYjJ55g#{eDE32z2E6dBvkOx@a z?rQ1kJlS!)_3)9#Bh80fnj4QEQ5_K*q$BmUVy$>+7L&M6k!q%TPeo}(bp^5lljB#1 zI@&Os=Jq3X_dfo^x}yz8z$YG=!xCWfszYMU{PovCQL3t$@{00`s z?WGB;&FZ|+&{BW6uC}fY6Pv2pzxUcbRTY($(6_2`rhIo%X?0p2f}(b~ySBUiSX*l| zeM)W3$M%sav+J{KTfwTTbzo8VU9+dEs+vMNDk~}~iz@QA)%_I#OgsBZeM@I+M@M64 z)6t`?ZAWVEH^S9P_Z_Uq+$L*iW>@dVh*g!)3wg^cOSYH)ng^*K*Be!xEo~jG?NDk{ zBR<^ST=VUZKR^G-zM30gNr!6oUw=)tN3|1*tW;Hs6=L~(Uhci4$d^vjHJj<0KkA}u zex^E~uG#p_?2-z)=C@mN%SsC9njfpoE6yn|rE4CzExWLwthy+duK6!Dx#cCrC51&5 zbj`M36%>|~7F8A%SLD()UpSRjQdE*#Sy)<{S4`LZ%D!FYB_(C$S%sy!`9*ZiKPV_I zE-x-EDK03^%SE4X&7ZYoWtWu~6_n%`6cmCi7p{5w57l|a)w#um#bWVHVSZs@Nq!D} zv+@4y+>*Tf{L-SLoPzwKlA`>ALilFWM~&M`D+)_WO7lyLi+APb7v^W@7tlBVx-6@z zxUjUOw4k(XXIXVgPG(+y#xDBip_>cy%L~hj3(HFL3bI>YH5vYRU~6tcCVlhU2eOLM zQE6dues2D*iuYYp^RC|&?a0oOW8@LnkMpuhN{Wk%iXc{ALB`1m^VICTwJ2*>7M=6O zW4Wcp3cgS*9M3P#O+8^5v)QJ#C0Ti?bk5ITTToh1N$!Gz{QT_Vl8hTZni;jvznr}z zBbUy3?4FW>{KE1)H$h%rQAT!t`mO(*H2>|$&g>mobk1j+in7ZJ3Mz`(MP6QRZhl^N zQO5SGj(1mWOV7{Q4CmbUTxE7)X<2!GPLZ1=2Xo(*k+UmfOKL`L);4HCEl=vdn^#g* z1dSFI=jVxe=G>f|?CiqK{H%=BtX(--sacs@v*Dbb@3-v2bZq6t`Nc)K1soBCV)n$Y z!kzi~nY%K!Wo=E{nTo2N$@N(IuA;)~#rb_fnzd_J)~<~7^t9AeJoL}~cjV@kV+|GB zd@*0l6LVecb|!0A=B{0t8R@F@`Hb`(Tj`zuwl6=oytuHWcu_5i2-#U;mMT+~;gLR{ zx_$Eod~EQn^Cu+*<;8hiyG8AAQdt;36J2IxW@MzNZQs0cLo!^m_R^`Gy!_mfisD=_ zD5_i{7BlT22Yvd^Z5vaP#pFPF?&WJU3X7o$OMXsaQSQRz7W)<<#%x;Zj*S~ql2yt6 z^7zCLuzISR*Ik{jHv*vbXre|hSGVpHS zws`|)GLw*)keD1FN5A|*<4!0FwppB2`m1r5^UsyLva+*waYE^7Sg)Ckw4K{GLR}uo zGn*6QlM<5>;g|JKm8X`K7Z>Cf6qjb-ZkwK(k$#e{tcaL7m5~Njq^EA(l)OB7K0aZ? zhIla^e%W+uI=o9>K~W);SMY|zHsz2%DoBTEa86>zR9fn`%^Q;wlaeqsRnknthS-En zYJQ+n`~J0AWqD>J@aSnYE48`e59J5`RRh__?YMz zO-wR9v+n+QO;lWLN=#gG6g~6%HDNJPF_Fo!@$q3%^vqA^gv3V0BqoJL$A*Qe>6uT) z#6(1eM@7a*M~A7SBI%jm-5(wfQcu|^SOqEs02-1TugF8cx2=?&kPRzwm2v}Dh!T!?B&Yf=!Ce$ z^vRr7ZZb#qcoc6a7|d9aZH}KUyKV{A3@Lj!NHh> zXa%nkHRBP{k)id5DcLo5aZ_k`Fg^35J0s)bw#CM}`v{GWizxe0M(*)^Qb?;k(!`LrprruKSV#L&KIt zr;Z{cBO=1oVPcpAFX~XWnrj2U{8?AL21KVuYj)Q;X{P@UT#IxESsX z3+1QJ46kNHM<&L_B_}RU+1)cJCe)#!Ft5;XaD=S$hj;Fq`Szx;xL8emd`#q`2owVp zpbJrlg@J8Wy?$Mwzdzjb;77HQVUZE>8{;A(Bmb@YaFB~?M?`oSRO06g`T%+4>FiMG zJ0>nVJW><QY4jnDijQB zF#tJSe(=#}{&8J+oF+0QUK6g-sH0OhEzCE3VH!|UC|LY`{Cs`kH zsK}%nPSiwcpoR!&Xf8}0f&~-R;Da850{vC~Gd{jPzW!@g(@Vcn85kKC6BiQ}soDOZ zNuK@PW;KLVCIvQ?dL~>QvVQH_0DlkvnYF7|`G6H(dg8w}252G^VxuC$V-t=~Vsp*R zeJ2e1=a7ytEG5=)eW0(um;cPF)&BmgR;@w3$U4{>9uuPpi;j;A3y<9Jg6smL>Aj=? zNE*g{;E{4hI}-^}X3Rem8}est6??9wDe#6`!2$HYZwLgOBnt+NyIyJ}y^ zs*0EcZ^&BkhykAd4lID*s-S>XIZx0}_dOIA9TNfXk*J0Pi4G6HRxgjwj|6~a&8K16~Gf-4YNBJ!k#ipIO+TD)& z+csPwZAJNQ3rcUBP;SHL2~gk*M9puloFuH16NL40ybvtoPUmUt%@{!~M+;%HMu?Cj zg-98nJC26C2@|4awGboYqVaCHw_qV&UN0ob>jc+_a)P{8K>i}6$o|4c*-zLcqqf~7 zZ<5ytTjkZlHd(30Z9|=PC(GT87hXad9(1(>*Quvf0kYk8;g#S1>cx?6UR_t03}YZAnxjFEch-zBry6&OSh`qqs!RX)^THZXKN!4bdzrJM$y@~iXT$0l;j{|@z+ z+q=6_s!(<6ySolIwKcWvYiMg~Y;I_7XgJVZUzgSF;^|R;|KZkSUpvtyqD!$u*M8%{ z+uxs+tq;{5X=-RbjI(+}P3_TrseeFW`}2=(K63K-iH>IUx==;=WZE_>&wYP?{b8Os zYib(m>aQ(3I1W2M`$SvkiIb;}HFqdA8BC?&yQ5ai_{8kF{k1r$*Vi1rzNxnEhHDF- zA$9$+6DLocKxu&$9uT+IJ#5pO$EKzYH#8hYmFnQ3hJ7`M_EqP1fcng>$2yPmgxyU! z+E|6TbKGK^b2#6=s{YWC!!`R49;`cf;6Qa%4nMy$+d(~ac8DEw?vjnztgUZ)cHS{5 z>rOTtsI5csqNe7+{u{2{Q(5pDi@e96jyYwHSjqvVdeL@6>u)|f_u`4#12uJ^K8Wu3 z??bU5_m^Ne)4fo+QeWx zvEVRSTaS5C@Iwc0fH3oi_Ux`ISCu=VW>>fxTDn zE~~DDsO2TQOUueiE6Vb>-hKhMFRrF~v3>yMyMy~#>$@IBn5#iiRaU&axO`7ZS;4M@ z@AFcu?@)a`h#()!EsC0o%d5)vT#M>R`JUX8Uryum6!W!*Q3FyPFdSq7sSp@A-rNoQ_U@^!5G%%^y^> zVuh+)ESoB?tg0-_O|O0q&USJi3}>N2RiS99B(J)(xNJw!-F9Si`U3~|Uw0MiNDu(! zwenI-qPjTqG5$UJt>OED}yTMgadR1A~p3;{6r+xgzw^`P%7VNsoaT3BrWLk7U{pfeXAd68%gNy}vA8%lx40-XHS^lX z5Zdgoo-EBRDlaLmtST!Ki^RgI0u&wcv$nL4q4YcC`k$7<+^Xu*3e>?$U}JejC55O2 zWanhWH#m{B+Fn0goQwKUK{>F+`Ko+dQ7(%NI2Yz*MW0}F>-l%xO-u%>zYpOFXJ2NX| zS9WH`uGGv;iP?IFs~;2;pvYHN~_}bjC4hmtWbHmBXS>_O|@A^zB=s9tX9Q zS&W%qDk>~N)ki5_?YMK?JUTihf0UJ*gBn_9X7;wU^c^WtwV*yzURGRz5?!IYM1-R-<-Ix^pG@Wr-iC8kH)ba|%0w)VVVy;sVKYb9UvWrKV+MZ%@n2O5d59x()O@QE*8Ly9wlH$`=ZKd2ZsI?7VCU zo|dxFmH>AX*#GD8DOp&6%oNSfSzbaFg z4pGuTy@O&WB_}3pk~YO{j@Zp#&6q7zieV^HiMzy17t3clRAiv?ojbN+c`(c5#DvWo zHHnGg{{`c5737EuQe=vmb1XfkrHZLof09EhsKz)FlanLjlM^%O5jF>5A zxY8lKxD(d0W9!D`WY%t2@`;b$oSdSGRDLU?kXzARn-ep}**_GyZOf*V#Q0>Y24$&~ zgqY;8=x@Al;Ir?i6>Lwr<2U6BAHUN=!&dOp4!-99;M;H*2oDS}CSOgjCl~ ztc|p3Ly~8b4V5^|GCn?K{f6(D8Q542W2IEwsY_#hYC~LdN^;7E6jW|drAmnpkLdoC zku4ia7+|?E6~(sgTQ_e=+PonlaYJIV)7!;36UL9v-OGWulvH1*?R}v6;|^uBU>uQ1t>WoB^rOAvAEdK!rwEpp}!Pz zB_?c%PsZHhLR0S2F|s9z8^okZFvZ}co){I=`Vk`=7TuPVfI&7!$4A9#3ZG|WgN9KO zrNyD%wfRSkY#0rCOpJ?$d~vbKVb{LS$cFdviHXs%n}0dZ$d0kIw#bz48yMMYKff%p^}qV?H%>J+c6{UU4;k4E&)v0W zOI&!!+Q4-oYIVwWw+%3|Ip4Z9J6s*6(S)lbB7=i9QR@TO=RbhRru(0RaqC0kQYohRA`M(32eW-CQcn6uStxLiA{v<#3m-kMQb8aD_C{a468A(m&T(`6dt`X zIyN>s0`(%)jlwZd#Jb?HmGum4AJ&D(s$xB2j3`&eYBXWNQQ_ecu}Q(|*wDyO&u=lX z{jp3F6)VQ7V#H`M$`qlFj#?WNA0CVnKxBv}LLISI{SxDv_3rHnp>awvP8F?RuZi50 zx@lXaCR&X`O-yi1a73`iyV$@}+k5qKQCOQ8QRz{qi3qvox&D!lZ;FYE2#$%2R7Yuo z)$L#F* zjQmDyWN1W`MvWR_s5;nh1;2xI24xSFR^y-xl-Z(Ci}Sx4H~3>#>CuQtlmH{bLqfu# z-L-*n{Lsy8yjxx|YEJ1oQjFB9xBbK^&&+?eRTCT*j>QN?r79#Sz-OiMnf+Mk!%Z6{ zMh&=YeIeQRzV_BncZUUsqCyfH0*gVN#NWs38yMkCq+8k1Q41B+$e?g6byUpy(C|og za7f5H^dGW5(9g%C8RQoh6d0v(%Y$N1=-TzDw5ru8$OVU~*JFU-wWz^Jp8R5}Hg2)o zg~DTaj5-LFJJia;RADys971wGU%zGblqT6d-!Hn4iH!;kULO{PDqC1+(0X++ihisK zDde+ZP$IM_YL;r1li`tJA?gTJrNbe;7&0Hu5|ql%8L*6BHgT;-QPoXMu1GOrBs?-S z4CSk zFu%8AieD^iDy2A!HclxbD)lEZ+{H4h(>;bo z9U6p!;F>jlsO_x{Shi-3>VDXdDioKxziV45W@>Y*yNIduIxKR=%}D0lh#`+9i$ ztX{6U0Qy;9MIMwIQQ``7sY5+OP3Sqm-LpSBU%A%TZ-w{$Jh4SUB};YCab>ossIkhr z#%GPM=ahvHbAxQNrts#g%GoY9BNl6 z*JW)0XjFa@ikG7QG?sLo7<7W)Mw`{ZsIdhgaHtL~)B;!fU|q|?-&@_s2eeK8ny8C>--hTY-{~}*Q{9SztVU0nj`#CpCN1>Dm)|v2$&#> zjJ~T@tnl++5x6qvB}BI2$WTho%|g6ifPZixmc-Y0RqT%#*tF}_>JZdKS(Q}Kem?8H zSNM8Hw+}O}1&4(K=i^Ti{Q?90VJ3bnJS+apxE7`k@B`f6$A1M->jDE-`>afR0&z`T zYw>5n5=I`f#y@a%ppRGR*ZL6+ELep=>=kIm$JZMM;ZyrA1Dl`dZ}Y?I_^(^J%4_AS zoxfpV^T#If@x>ynS+#n(`Yr}GF#7lfVD8@DzIA69*!-1MUA@YGMbV!a*zg|HT)8Uf zK|KST|9aomt385l8e(7zSRJ&&tKuaFHg6ws(=Qm)hd`R}HF|#* zKc7ad!H+yNK^OSpoVzh?{19585H1G@8q_+YQRH-4A;>CJM7{B?{S`uz^57c|UZ{#L z|G)jX*;b&7l_;!wqq|i?9LlFLsHR5Cz8K9PCD}lfX4eU!{MO1kAp~{U^^k5I%Cl=B zYyc{@esUBFy%;?THQ!j&f8#No1k{6*P!~=?jd&wIaJUIIWAuO{Dn7~ihU}V^Gv#^X z$nY?4^|T|yBRYdY-j1&;>vVdZsGHXtj3(U6*=NTL`jKJ1#b`8{Rpw3doPKz4SZ5TC zQ|2)f4o()c1s82o&XIw^5xv=DG>hh0i`8TSMzkc#;-F(#tJ50{qd<+SMqOr$rzK9F z85`~+*r+m2S>SWy(4+YBrfIqgI%YT{k$S zH|UIJgNbs9mT9xaY8|y$MlH)MVe*6vLJw<=MuW*>T%eG^DvpX47fC|#NxqR0oldJa zm@Sajyu$1vngW=YnSO~m3cdzt5P zIAkv_i)ooDP@Wna85|g-CP62fW}qv~zzRaqW$zxvRF@k!L2N}>nPydisZ^S?@qe(Ow ztQK|)(fAi#Sj`mIWb`yTF|Ntegiiq>VHxSW*ss;=bifd88dD^)kXm5_zrtoT!490} z(J?4>xojJ}&@(tZJfwqoz^hF9aq@9OFfk4X&2hRE(qXoa;~nv`r+miPdtsojZ_qs` z2!I$jV|k2bEHv~2(gkJ%m}fC0nQ4UhVE22@^Pl(k_6-h?XmvXFXoP`|nZRx^j#^Cy zMa7Hkz~g^K6xjGOdAjFZZ*SiK)Gx@X)r=3S8|I6(Kti1;qWS>mu$Y! z)6+jVg!Rtee3A$&3~w$uR9@vU%)$Pj7$!z|hEu zmZT6{VHQpOMy#m9ZNcjTH#f~Fxv4ZSd1CO=#oqqD!I7a6Y&R1YS;E428YQrb#tHOi zzKmBi&YG=CkD_sfJlcPu2MQk=92(J^Oa{GP;lzGYlmj#8;1E{X>2N4|MQ8(Z0B!&I z-oCzpfx#iI(WKXr5ky7*?+5hn%_*Ix3yP2&V_l zLp`JL20FdPpcnO`!LIla{DHb~I5GB|>j&eYUN<~~xxjN{GEj`lIBh{ge{$aIg#mnS9vG?7 zK$nBlS>MHT7yA0K#fJ2HY%^?B&QH;`!36mbGd%QjbWR4HO6Mm}&i0+Z)Qg=lpwo?D zYpL|2$)>E0PH%>YpcnOax-&=#nYBods2_w1tnjH=BFxgH1&7W&0$HZDSUoLP0uv3C`}@!K4E79y zes~yRhK2&4Vb*Ce0H;l=DbOQ?VL0R)0~gNHJ?IC9xJ#8;>#&kqvjO^Eq&Iy*KRkQ> zTz}ty9$N$>XbsR3LWezHpT9wv5 zf}R&UM}9bb@j{j>qb8B)%ViDFfSmj)}qrIF#e(# zG;7kXmkno0+5>MgtV3D>L$v5epaLxFG?fImO5;-!lo=){hT)+hCMd&nAy5<&lo>77 zO&pnL`oIKb#)@q)gmu88sEkNZ5V(i5%HFoZ?imM^1Z5ce4D#vWK4xKt+|{B93Ce(D z2nvU70flIb21}COE&HRWIMla($_YJctlVO}0t2 zLLZi31VT^}B(Qvim)65j!3KW9@dqa2L}YGK}&B0~^I=HLeuFJ>53TmpPN#=D$b z)QS2TnvI(sU;R4I6k$Ldm>k0PH*&Xg`l4QC7=YTqZNSlhNfQ2C93b%;hi#1i)O?X{k=e9v4rg{y`PX;a^1oRFJGsyt6sFScXo_Yza zqJ9GXF@ad*#cnoQlpaMrQUyehe(*9%8Q_Ps3MZ^tQ4eAU2ZywfNJMZOMUsoRI_4(~ z-93E_-te3wWK`Tb>>Q-ndpfvHH!rQ#I7)qCX3S4!&VSz1hd7Kqic^Zh>Y-B^$FNdH zvle`Mb_O@c7=ieLd4du7iSGPGde2@&E_eqHsnYdh6n1Phj~Ee87C7Nj>Ep4?{3)EV zzVjC@^nwo#cUboYAL1Kg>t%chkQ5zfLGU3z>HYjt4-*aD$Pir>!hn+c(s~eDO(Te9 z2)ZJp4W@@bP#5!)OP}{$ggt3Ta7Z(Pjgx_5RQhQ|Oq2FXHs}o#XMSQmcj4ltOT9xl zHEF?y_>ay_qRxRSGWGxnh>Kt(CPwJTi7>41Po}7n*`v-LW14vMYm06EqA%~5+VJK&T-tYze@XWc-`+ECzNE$H$;w}vx z@x!2pAwe9wBF;khNKdrqE?w-?!PYncQ-k3VgC1*%Z8@wOwhy7_#m<%VB-q1T@M`$mYSKu$gpW-7>P4iND+fpONJ%!0^`DhDe1`^(-V4crYAU^kB&`DFg=+=tQ`}_=Eo;&lT1(M?4vm4 zSSDqPulFFebzBO=>SG_AU%Qcn?|i; zV^cs)sis_Zho=MSiQS~dxq58eCZd}Oo84iz*&N_?f|2-dD-0degsFbL2Tz`o3m6a^WXX%anhM0C4A;W6c9Ung5!x)GxVoY24| zk$CK0_W4P>G6uw5Hi-;nk{gSf#1zEI8R!ZVmJGi8HXZNs6jw$F@w8Mvu^y%zlW9S!Jb6|+edF+ckHc&6}AVV1$95GtQ zZ5YU9n-nJv(++lvx$-Z%n8xCAC{LRc<4$@`;x`wNp%{k;b=L8*31EoE6!?mTv|&%4XXb$!OwUqJz%xS`9qL2a zHDS;(LW!8QV|i@$Y1pSSU#ODI6JVajkR%gr_$>|8Po&;U4DGCfjj~7E#Dr}I#*6=_ zrojQW$whYH@jn-4(x$R4lV{*U=~q$jnHyJl#R+M0(i_i(Dh3!pNj|r81jBaLX9DFu*HqL!9g_%N^ zWJG#m?i*5aT+{f3)y=1x=*K8jnSE-^ZeQY@7>5e+2byMjGBPkSf~?;(2EGZ7=jL;e zZ~8Jm+XS7>#28nI>B&$JTs{)`iLp_-sj&&{XD%<)GX-ZbHtv{!0~9ChiVMLXsEg^z zU?1Fy-ZVaD8O2=SxG@JSs^u*4@OJ^7zvzW)hHm-E;5TQy;4TAtk ze@n0v>jV8b$0sJ4o|rB$D`#z7XSKmcVMWF;k#SfdD4+-%T!sfi0+$3x8}gHxAsmm8 z7@=nCoEWzvri_k{O=9+w&y;Y7#7?f5vN1~w`us3?145&|M( zc#O}{IZceI#*m=Q4fi7J$1r1KqgK(X8W-UdNk2Ajhm4>X$L(}y5EDXMnV^g^+eX4? zRapAR5Je`TH;6W7AGbOXtT;g|23f45Oi&CL25=TM8mtCX$6*{W8Ys{1%4#1)&x@TSLDBXMXsy&LPQcKD6*GVmZIEk;80vvoN`lfmVnsg3GvXMG z10mHhW}U$Ji(=5ONs9!fhooB8-Nq)yp%{x}d=x6cqE1su_@XDR4+#oBq>mqwzz6SD za+>>xG4LZJ(fs|KErRr+6}tyN;!^~RN#=c50IrL0-?a|6M^dyE14WTXE#ue5qme7% zH_PwF?Odo3i*En`62~tA+>I{*;76jZ$X@UZU?0jUxUfQ38+kkSAu=EQ_C~b46*pTL zW;^c3@M~clBNad1l7Sy}0ckp4qVa+s&S^p#1-upb<@lli$?-EjMfkD+I`|MBcndqw z<#y%E1^Bs|B7BQXxrW<}yRt19db9E^1pJiQhq&V7ml6Vm4ayf3_(g@a1Sdl3B;4pF zDqmyZ*BQPp&%0)gIt=2RMH(p%&$Gl0*?uoX7|94T>>zKFr(|M{VAevH0R?@vu5!q0sK6i`6oooM0x( zC6>pihgBT)#1jms!TcV_F_m&x3z##yH4m;Hb*#HR>hX zT^t7oDcq4@hLgNyvtY|H;T;WtxLy{OY;|c-r^h)7CX8`7lxS92W*~^tS-_~1O|Q`j z98egRP>Db|R_@yn+n_Qi83V2xok!OsbmCBoT8)I#l7|IqC8?K1g5Jb2+~0Xve3nSZ zl?oT#grWqcx6x%7oFZDy7B9>4(K%poJGf#LB|Hc==O)EvAT$k9d=~;}v&Gko+dr5)Zz~^(X!mUKnVC{gmZkut3;9ahGri# ztleT>Wu9S?l$Vk!vqS0C-S_eZDqg*!ylOHp$Fw}mHid*npbQDgp(FDevt;7MF)N|o zkaiizRYY>XD3*?u_Q5`4Arn`rbG*zP<@Q1C)GZZy6)h5IRpxo{S;iw8P^7$$`d3dO}MvL9UKEmC5KDZ86fH2y_V=SgC0i_fnWXI9Hhc*f%z) zXq;1oLNOJcL4FpOag}WHf+PVZbVV9~lo3$~Q!ZPT5;W(9>t-_f8F^97iY$aznLSL3 zR})!iMY)wlW(@=!xZ|LYf{R-0 z_zPF>Sj- zO;3gqeLxH+WOT8DtTLfI&igUk2=LO8_h@dhOh#W2Qq)=Y3$zGY&d`GQAY~!jjUdAs zg^W|;6!B+XnV**d=0RFq+p?&N-cg~2B25Ng!xTs3#UANBjLQuZD2n49)xz68RGi(t z_@JhXDS*_A1$g{L0)g`uhi4fblV;_TzSyt^sSYp3K&eJUS z3-`>T>2e4LAnz3?ak&Y8RG%Or{Vm!m{R~E6(Get07D81hRO21x0NzVar2;pKG=!ye zR&$m4Da=qK?=+O|MZG7=_M)B_4+i}TyK#DFS=xxsEK*8_A}G8qh0jD&9_#5O`D$MLn)Y z{*4WzfKAldpiNGTFQDgb{5(l@o?w#{kvw$#RKq1H-0_R1IX4$R4M0iyS}$7(pALNP!wb61YfG#Inde=Dyxz&Yk<7o!!~#=lh#o ziK(sS%scP%JKx{?JkRerzQ1j?BYOVCQMfnD3#B_~f1S$rL z#8ySD7@J$uqF2Z5`~rVFIgEP4Oq{&{PpxlOEnVYBh_fT6od6hjC`Kiu*WUps<6`Pm6!oW$f=1E03oBUAoZI$ z*=W^m9x}Z`7okSGqS4!xBa-`uZd(okTtcvffV{9q@Jqby#$V~{;)m1?W9O0BH4`yW zC#Ds+MK3Xk&TPzl0 zMyk)0=k{(Z{%ZI3mhq?>dAo>Q-v`NpWV<@6P%gTo5mTZxOQriix<5lD=pxiD5vH39 z6R91LzEIk&qfejdrY3RPPe<`#pq1T}b9ml#fNiyr_GD;3t*lt6frX%3hIKqjw3y0G^99B7uCWlO&1Xls2%)x4-t}GL~_d;^&|)l zt5mXI?=~gpN=s5V;K~SK(V4sCT*bWY9~y(%qH> zw@ZPdak3*Ok%30?n&2zAy!4~OAq2E|x6z(pBHo4n6%NG< z51ZPn_#8C=sNfq1D9}>@dEPX#S=57F;xbdy zG1SsKXmL@3s>BE-y6oZ{U1J(*l#?1Ffo{50EF6STT}2`qhH62p5faIT#~T()b>@Z& z957@Wk|Gl(GLH*n9${C|;$)|Bqb>yUQepB3^F6gDC#>iLh1q;_BCXXvtgwW*MsiZi zVjcqlZRV&Ct&V>goYV-_$hWuO#8r84xUm@QYb2)U0qw{m^?Z~s9tTA`Sg zZ84h~mOh}D#;ATh7bj^ihE{yn9x{T@>qWnQI zBPTDCRgT!*BC;z{!(5gy4Ye*eGp+bN4V2IU{Hp<puHqqX@U8kZTvxX42P^Y$$_VfhpiBHq^Os*>{x1>D(;I%c8vbqh8mEG|fvb3Qf z`vCQdYMEM=PypR{i>@05Ol7Hn2dNTPHo)yI6OUmPQJz(b$PFz8Z5q^ttvu?DDL^5y z6mHJ)M)7dB<)SySV)Sc<2?*lymCM^r$8m;JlgxZtGB&rSMX#RQ*#-V~WIfyT)3F3y zv+{kjZt0w7dr*5LrkD*9V?%EodUQI=%vDYAH7PWcikrdMG)&k+)?e^96~N1n0pId8 zgOq4`Q1Z?5h$o3Bj50Z_Zb+HLv|%IxLCjJnWqz`1+nCmI5?U>k$&N8WOuUAh-NELd z-Ocf z4&P3oYjuMBp-f`Zmw6qxp&YRLdRBi(wf)WhbRXz#lp*-LoER#OTLic6LT{&!UwG6X zK!Y}^Jtq7Wmlobe`M*2eK=;6hM7`-AR!MdZuwHE7^pOAe1_QOyJKVGTGd(GjMkT;l zS?e&wHXRst`-W)HTkZw=GhKV^?!ex0_)&{|P-?2z>-V9<8BAr%y={G+=`}ZU1Y+TT z#gi(xD^6@YKK(%7qPZizEh-jSuVkEq_>#M}yyf_3p{MhsUVmVbozBP>e|+XG6^bZ2`*{oe1*^{lqtvAe?wDKkDHr6F#2x(B;9U8VoC-2l&m5CD7c zmjDlrq1e5p?o4lP?>5hCQXtUXGOlmqvtR56d;1_+=$gs`-2r1Y9)GK+Cp=4~`#-ur zLnR}o%*Nwyo0|*r{M%4WL%mx^pFY!1GiwvNEmg^yn%$!gP~szMp4A@pzRrYpp0Niy z^SIV0_vDc`D?^XM#%azKuJlg#5M)+&wzEsf@AnW6bs=xNC3k#hvBP@rb{0AVf)n^% zkpOxQdqrktrxJMUS7Hb}t z=b@I$1gL`U*$hFg-a~5Zo~FCC+%f}zLYz{hxDMqsmAiGf;*Y4Qx7ok}ii=dS36zI) zrd2QweS!lN4kHgKK2)f11syw- zHG0J~$n(y3iXXrWDt|M?re=wedkZvFfZB|sQd4xWxe2GFw&pAyGu}%JPc`0iY7paA z??!yL8v|NEE*bZ_Joa`|^I-bH{h5X)73XIaw>l1TxzQ)%GliK(eAK$n{5OIGh**s+ z4Yo1YFprK3aiEG|I4LN~0N#8i&(v!-?M8rdn^dfFvk;mJ>;~eZi3@hXTyRr$&{0}L zSeg#cH1r(lDFdY#Q)K8(F zk?B&Pu3)9zNl}@KA_X8px>3YDwMKoeu6JR?uXcm7)H7tRp_r+caBS+~d{~4L$^AT6 zCqzW8DYY6CYMDNXt8b?S(8$< zZEE0As)SV!a2u#GUQVk$oRlKYMvDPvv7Yw_9|!+?=#ZR0L$ttI!TS9O zSU=$W-Ury#;P+huzYp*K_xJyJJoQq2T&k0_()hRE{*~{6!1t|R6Lgt@3#$Nh_Kh!p z&xJrfl+k}G^W7o-IuQmGwh{jzxH>;gp~*%%@-OUy!nsc1;+2Y-+uc&133S# z^=|X)Z*!6X&|#pM|NQ6PxMkg{-vqkpwbyRne)aZ^moI((&vaB0DG;c#jrebT=^wrQ zz3+VM*WUfcd+!-^FzcJ{J0N%oa9xOIfJwdn>Z`AV2=&VKn=f7d^uO2HzFFf?-55mw z>6gFr?eF~RyTAPQE=1cqz}#D3Edam;WIde)hw9avZvd=y1SU0&mE#QYK=;zg^#*e9{w3F4PQO?&nBYgga>TOvZ{YF05h{_|h@5?FP>u>m~y`J11+3C7qP z%*5@R*ZFq)^7TL1Ary!$$xJm{&NasW>&-8I^Xp%K`^~rB1{MFcS8l&?{nqQR-gxc0 zb$$58i_iVGLx{;-HHF==Gy1bHy!Gx^-}%N@zjXWN?a#gO#`V{3-FofSo40=cv(LTv z(>|d*bJfg7vydHj|K#RZzxkE7zWl|{LvrIfRbGD;psr_c{;6`<$=02zy0v<-k?(E( z@Qt^=2C(1t>%jjr39mD%&%F3M0%@76`t=GKLd|~vcW>VK#+SeFDnMfZfPL=vOD{eD zy$HG8^VK{qe^PC$oB!_m7k-hM1TgHz?OQLt^b0~F!xKMK$%~K5khj_TH`jmhI&*dl z7`w~2|3Da(OVviH)%<5K+yFT5#*OE%{Q5c}GfS0Lw$Z8n>QirAfBom4{ig89mMZN! z;dsfnpL^xXjUV=j(p{>MpPDPxW4B-Utqcj}OO<{%U-xs3JO7(kaETa(a;}_B?U2>J zRN369E4hoPo2Bw5T&h&?_&!&eD-W@0Ih$M@DsL5uSSHd%-%4(tZwD|FZxsufZNBf< zcdR5XFZKc+X53nZyjL_=vIDb8b;y*T!)$CWvpX4c*(D2s91^@vDZ+f_Hpp9%b8#c@ zWVxQ-=wVpJz?Dnxn9Dx)PAo>AOC-)*sb40Bg~00+p+Q_3KbfCa0>19aI_IWG<_gPV zc9ij8H}WoRZj`;Emt^BRja8AYzn@aR*EH75A=aW)iU^=-d5#KOIX_k4%2-ba*rW2< zM3&GaIf`_d^PCjSOLkQ(p=`h1)F#)mLdg7uYbi;9QCHThV%~MU7<=!oF_N*_Pt>@= zW2SoCOa>rR3<20o^2s!@N_=PQo=(*HZsmu5!3}4LWZJ}d;-xZ#HVt!bE*=$_wwK8y zihS8}+*p?2@om+1b!=sxACKY#u(vs^_WM(RIj)ZjwW9C1?_T}vYoGb-i!Z#qi-Bc5 zpL+Hx=vSAYyTaeIpS&n|AweyjJ$v%ZnNz3EoqFV#{u-9L<6#S_Zq$Bq^_Aye{ka!j z{_OJ;ynLXd(wDCQK@R-WbI*d)M0v22&R;ll>cWMSPo6#b@Rc8mrCMa_5F5Gj&ZQey zUi!>)FN4fJ0r#@5)xpmPt_%343ul2J0&&PXEm)z)&pdhRku$%kGQ`P`-;y0`e|jO&NjT(`9HsK z_2rkZy>RWt=M93_l`EgpxSzgY%AE(p3-q8fr_P^$;@FEnf}@g$3R`Iu>w~|z^)#4s zz{u~R)q^5`=@aLfB%#lPqI4S6wR4|%?4gVQrT|N|>exy0HKIAYfBx#TuU_I{2J33g z%+Hli2@&nwlK|-pJ*s#1hh;QdG5lgCoVjB=KN`}uufU0hUbny`Y+eTQOhZk zuf@&6h|#*ip>p@iT>kLm#zVv258nP zV6mw3%;|HFKk>k){_98_RnatV0W7>iz5E}Zy!71Dm!3X<0i=3igPl8j^0Cvu>4>A2 z{Sw@#h;H?pPo01M(#4Y(&zuB!>LRF2$KU)rIO-fpOl1@CRw@4TPdxR>^E@xQbK(5) z6QBNXVyR^uX~SxXrGm$$z$HMg>dV*vykN?-#^UOPhq7X5oymR02SVI4EWYVyUGq5>{NX zRDE-VmURGrge5j@SSsJQFts{jqH;}Rq8_;gb7ed#yF3&xjJfo8(qgH_F6m6HK2mIw zVyVThGU7<-$!2`#zN9GFm z27c+}wzWxy4SgHaLN?WA-B*aBAd@6#_O%!&ohB-fvD;u6F18Q!|_mX+>RJl9` zvS)g6Upy6isV;p2ZpBmKnbZWg%OnQksWQ-V+w+{bc&ZY4@_5M|@l@D07HwR~qqt$J z#@?R=A3#hM`!AN}L<&ZVjT_7Bb_y!EYIf-6oRDBjHnW~fCaSoqpYfv+aaBsk3*xG2 zC+dr!3|Dn_xoW3FK1=@d%csvD7bZTaR1@5#)KLJG?iYj={{aC7^7i4w zM}TPGe_&yL{@~*5=YK3kwa_YWw735D?BkD|KJl@W;CBmQOOWR2`yT}LN8mpX1E~Y* z5r8@O9X)tpd3hh8OfzSG$LOieZl%)M4uA2X(>rE^2lSyk3Ie&;t`lw*24wBk%XHg#Ga$T z<;?Cobl~vp{#$=3uG()8+Wldt{TEL^c=9BGX(x6YKK8&vAA9WB{fAAtWk7un-FImJ zzNPyP&VTZU(o?Z$^t!`(XZycjedzf8pkCbrvh>i=LxL3oFR3N`o~60PBMbA7{fo5p zR1AVNi6MID&o4dr*z$2rk!K*B9(_=a9Xbpgk>K)|!IWB@KJ?Dtz*DWx>Hr&VceuSX z`ooj+#~yxS0_gPc!}l*ATv`T>6}14kd@w1Y#3?>lmsC$qe)xjM3M?&|-{!BXdY4(5Y# z>jy`V9DC>ipq8LmI!J}UG0o2hPX3o|anvpiyZu3%Qyj|w3RJa+9(fRaDUDwMTnn?y zzZMfm?fL!gpxfW>SN{n!_Q=tFO9%IZcy#2x`Gtj7ek_jK%h4wiy*k6%_a0onPX`Db zWSK3`&0qXIanv66b`Gz;;21qt5y2)@_mc*`xh63 z2fp%Caa3$so2}%Hz>$Te{nJqjt$&X*Wt0an#P{u+<`vOdJ)P?Pd?NS#D_V413LX zr2r>g>hP5SUNKedeOt|1cEtDn`rc|(@?xWy9Iavf42%}6nC#Kr!ca+9Zr%~9bUN{UV zD65kz41IO>)H~)1yF-=G0x{Lrbdv+(0PK++YhN&v~pvh*SY!p=)dzwU? zc!sAM!JyKzwd5aah1%6N`UZ#~vRc{{8_H3LXOK>XE7AKv_Ksl+`i8a6Jm()d>Kvp5Tar zQ&UG(SRZH1A8*zBQ>Ved(m@7i!N8)fGkoZ5gL9m5pmPq+1B(S5);T_&1k#K0=lQq* zP!_d6$;U;`BBk5~K0b~`L{^bcfSsi?6Q1JiginH@^|T9;GZ_NotDMcCGa8>{9|;v`ik#IG6`@wvh3%4;ddxFl7A7aJrFlYoh2QcLmH}{n~0xvmuq{$4-s&89oV_dvEm6>$gNCh6LzH(dRbpDV^H4^N`DK+IpC>@Id#*=nQ@>NQDcCTY(J!eJ4 zaW@H|1ISB;$SryFo-T8fBzO6c=_LNnb~qGMr&5knaE+U|seJDbX|m9?{LmVxBurX5 z=g5~#A_$rbF6e^E3c-5}Z>+d?qaMM9sWeyvG=^_kfC>EqzFW-&%+gD&Bmn~gF2g0O z9PJJY#<)jtVq9AfrIN7(`Q4d3z2v^U$7G36gOz=JoG${J158gklVs;eFK{e-jKWb;y`cxdqIyt5jM#=srO zcvNkQs$rf2yt^5W=w2f3Qj{sT+yNQT3JKt5wY0JpO|h!TxZ@`Nb1fx;>R{(ZXDyTx zqCtXGX|g-WsM4B{qFha`CKBN|*imHH=;1(MduK^HiA6K%6j~yRv*Urj0Ctk=!QZRf z(To#M3r>PZ#=7*>lk?+pZGAl+VdB_pXVaNH8G~fLpC4CvaPd?m>Ski8L^|iIYjE>w z>P{pTi#Wcp9nf-p-~750U5&@1NjD8L4~tHZ?83OU8Rn9Nen(mTzU_Z}TraGJqtUd( z+M;XZ$ivSYnf0}Bif0?6N)pfE)TFhce zKBx==K=9mvOS}s85U`@4i=IAsV|eheq}XaA#(Tt*5bzKWPOO9XQjm8nYT9tcl5$J} zlbO!gnIdahWbQp=Dk7HTuZNN`8U=EUOr{+17#~up#=P%{CB@g1kqG!3u!;=m47^)= z9rMHyAQDUB`SY+rKgbkVrHo4Lxvm02%#wp8h1R3O)!?D$GQ9tmM?~>GG@e+Jx4xQ4 zr$9MK`e`wVEnWtfmzi8E%iR)N5KD6IMBH?gh3aP1nCtU8TV9`+l!5Rrz>?y3nNYY2 z6U7u63dXoc083h1PXN0`pHn2F>pl$(!Qh|<%Y!8qqS1I*&|z`5UFMYp--cI4w_!=y zNHoNllTi;)ExoWnQt7SXE-c9pMMA*Z#8SX#`TVMv8l;$Q9-3HEW+e(g13t>3YEx7V z^AyCALhz(y3SL1`rrgR5d^$zGs#sF-?rJ2-+w8<$Zt@0dDUE>kfF)&Cc~YS`Ym@#k z|41xxSh1wUN<5B0KsYi~9u5Sy14|l2Bg{7S`yPux%!dc4U43;sl7_noWQKLbx^&bN zv81)NXebeld&!uWgqgwf9kC=HTr|lmEzXeiNyv3FVoAvl!pUej<0g2jY?ocv{K`8R zi$xNdWSpl1Tndj2mNW<@DCPU4^TN7pS1f5Y#M%SMB~I7UnT%LcdTlk7NJS`5*HdY+ zq{M0{o(Sm|X#nCt>mMW%>k;?}V#EB9yBkhN*20l=47d%kB*a2;bv=}p>=sLkuf@Y$ z0y&rxOG+4``eSKwp zRV=9xTUmn>!IEQ#UIwgGEd@cy;>Jb%I6%gR!Ht=M^`b z(u8sPRYy20ZM>WK+)%QKu!kiTGrn+k2muF$R6a#KaU?d|)t)}^3Xug zRAaMN7E5yC1liMLs))prcri)X&gV8ZT4j2#BvR$Yff-6L7fIc) zq*Rh5eAG%zsT)ejR9f*pG)iY}72`zBp%e<2N_m!B{D#gg)&6lt$0 zo8MV_-LSE@xhX+;b;by0yYvU zmgJ2wIJ-6?n;({xi^j8=6p@!@`osLQyOi5WEXj>HUdqjFFy*Ay@Jfq6V&BvdQnNyE6(L~1pBKcoQL z6?jq!H7!#RiVLffVmr7-YKqN+#jCko3I}pAO5m%6jCsk0kytH`I#%dUwCOj#FiY8_R*qq@>$*_1* zarGXaq)hcEI2rGxNs8Agc@HZm{Wx3M6KrLVlR5vGIZN+RWw;+RN9!GDcdKLePKfd$ zbL!p`>~c?n>8!)}PU+{P=4?LgbSK+eyzV--&!p&|SEGm6(Jq?<{m9?{0d}y9^2{%? zjXh|#v6C$DD&I`|I_(VEEZVt17UV`fX_%Fr3-Qts25yq0*b1;h0ii;> zPb9`*l5kBdxk7kVI!IaeoUjuUfo&1-o8Y^)3EbuFRuD65IUd{GD{R7|+03fh0Kv-& zLkBF%?CK2Cmzv<6p$19zOcGBUN3`+-zfx{w{v}H+NO7tG4T$jcnd~-OV=1IWm9sH| zS$e5Rc5IKW9I?9s^qLf>pxW(1#F}S!ogp0|T#vmVI(>D>w z&;+s?Goj>a&d!2r$xKobuq~QJFG?mHo<-GkS36;8(V7jB*g{Ry;FKk%Qs~9nd8@Sa zUrgiiG;lRh&7G8CJOrk+xs+L6C1Fp(4TA!el34ahaqv^qsYFI-d<0$3WOvl(Hsn&i zO-y%oTEKu<2eN@|led<~Lm;)2j6`*+-<)qs3{Z(_Kr^w0n(p~QU%mt;*5@y8djm1L zR9N_mbAFv|0_r5&Gyb8kzBl6qfX(0wPhNdw^B;=}GH=HtP`W{9wE;jo7rE!p`Zids zng^6knnQ8q9S4bKLe6Z_1?Ep*Aei?}xOiJrM+L-y#{`Zy=WlCLiC5R!RIjC}+n(uj zPMLZvoMG0+t=KTi5w9aJEFf}ocS~(*gN-*CA@D(MOZYMyD*~xmdo|UUWk78if zpz)93b|y|*xwTk%`dA1+8cQazg9m-vtFe>7M58?y%iUxG*x7*J;E5VcGVyUg>F~}2 zfj$|Z*`Vo7;!mUV{As_zjIdc^D7atZ;WVs*Sas!dT5|A^_sJt&9g9%v%?6m6NubrcM$Me=Bp>0Ng|qtEdgmd<9C5~U5l7HCl^eWd|)@n+)5l3rI!_K#`4uIB;TlwM1;wEofO@x?~Q3nlw8H+DV{}Z$mo-T8_!J@5*q9indKYl_ zbezW;)geY!ioLGphD};9(_@nZOITmYB7D2-d3jc5>ql?W19sifyAc_>dhrDBpl;Dm zD{ZF+Np`sF$dOayQViU~$vrJzj1sX#XeEsvwt?p2YUhWAjGa)DP1N1h9NJcyw>cww zRGR%?JcqKG0cBSOG5CTQVbZ8iR-_S40nd$1GMRKR*eKMc9!VrHbZP6B zN=ELk<#8S~qOmAeGt3igpjRR#v?pu5vlfX*Lg8>|jR}2fY=ydOk%VkQku~krI)5L& z8&0svy0czGE$XH3tWo&RN*rZppk3ZNL%~zJ9k~-u=H?^{0}*Lct1 zHpULZ;?mNy{eTC$A@)pCm$?eTHh3ploZJ=z!1~IrJr$3S$EGw}00{-%4pJ5yjTSqW zMDxm%EJI-}X)yuucb;uDI@l{Lo@2xrMFTMKO|!ZuWzi%hKg5Go1Mi;0eESmN{2*qB z72EtvZcdQmNJVd(w*$$stp#quwP97faV*kg*p~>0rD9>t<|f4{sCK&$!OGYn$g*U) zs0ii!8V@8CV?#gn6 zSkhPwi7n_!4USje$Rsg57@A6pA<$|xDG{!@qab#tNojK_X`_;uNoZX~d7qJ8lj3^U zuter1!d>QLM}2NX4jDaAMbjRt2ONb#G~uH>9zu}PiEup`t{%TW6bPcq1lUHQ_olPQb6}3r}8s1YI}EJBbK);Mom23uA&s z8*;~;MFy}YA><_&Um>49>`H_ia%M{$*v!11>AZ_*xBWJQQ3b3@co!nv-PWYqzJVX-y#h)5dVBi@aPn6@6O*BnZJE(A`hbHQ zjv5?U2@OalPqV`S;J&=cuwW|_a9FyU@>J4FZpR}jiSR6ss0-(p$YN<|TAUFK+jf{* z*;$rjbo0-X23?6-)E@s&GsVDr{ zgPUg((Fo_uLFOZE0!{-076Y79aK6+RfVEpmD@`B6lHgeivsNtT4VkAWrE4j$xHU2f z_TT6Z>0~$~3C?2?u`N2r%0HszNBj+1<+KyW>tQ6g{3v2DL0&Ev2G1JN3pavH%5fwa zzMGT;XYB;sqGp4cp)mL$+dD@hIkXMEIma$n|QAo$5sm_#NylSQ*JcWYQ7g z%xVU;tBE9%JTl|q!%BDbGDLBp+zI6+!qco)2{bz$KrDNiV5WnNM@&W5R*5=9gyWY| zRdz~(5@E#*rR>xwor&A=ZA@|!;d-)|!?|Ong6Sq}CaGO~JQ7)7Ga_7*BZ9efi|4?4 zipLX?wU8?jo=%y2sr0^d56&D*L|3CmgkwFT5UVdp>s=7glL)tPBqaN=1^!@1QIo!XsgvfZRyNlZhx!lkKQVUutK z&P97*a~?$<_G0u7;upi!Fq^rS`C1S*QDkNx?BK& zy#SDK#Nn$X;2zARv19gV7EpcOdhNnZcV%hC0A^-G=0m%NQBSw9D-x0+iVpgPe6~(SeLhqzFL6dAMi%p4eMYr#$&uz$Mht-@` zfb1iCNO>cHY7-Db9*<68@RO;kM0mxN7+{U0fr)`uCR3o&lr;hx41aaI415ya^h8x6 zoJy!%*mVNpqY1^I1}0y4^6Deu22QFh5iT%2J$x*wq#_vdGFW-GzCnoLo92j1@RJ3J za6`^)$!aSzvw5cTpw+QN+nPFxl{fr!St7gwD5AnaJ6fCSwNBpB!CqPr!TOF;Z-^N! z#!3?57|f}FC#PqinwUosj(8}+dbh!G3k01b@+88mK+aj3|BAAKA*L^Sc}&g@NjLH$ z^fB(2t3cCBgo9I8!*-1eK$xD4SjK#oN}$4IX;nkaVKzlKD-jNchZ$c(#CntX)9A{e z=iseit~3-5Q*aZWM7SDP#+Fiyy55?hnun$;ErAj*}3EoJ1aPV=NTB{zc<@IjzsD-o(svwlazeEV zyKpf200@aiNR&t3jVI*!8J1YALZksE1^l5Z2zDJEqJ+wVVAYxMTb#$Gs_deUDllMh zI!g*r7Hq5AZZanct|u#mGd;cOa;wUDPmBZ|EFP8P7)ZR``@G350mK>55lpaoL^yH9 z<}S{J8DM&PSemkVA}&U3#L031S4~EG5Do2(E->L7_J{K0N{r{h^3n_m$tt)35DCr~ zCl=0YE6za>2K0?ZA^bwl6U=2oiIU_vM-WVIaYq9K=mJQE)KGHV^XVD}Tmx6YC!s)T zwtL8NW=-HMigIEmHA4|L$jEU#)TE3E*M(;##VVSvN{;7*dyI8iCksKlw40_N|NJQ6i7J|MrJGi{CM=< zl>7_tPwB&+x;hR_T_Kq6Iq+~Vk7vxEBI<;MtEjqX$imx;ta}RZTSeD>9OPR?*j)hf zmMFUuCd!VFLj>9p0k;g`Ezxg>!Mr`n36u8|b4S?QQQQ^}5_$*01IBalQ3M|GamP4v z@-bW$$ES!Z{SjwMa`NPRQz!6JoZO4Yd!Kl`N$}ky;tt019wBy!%vA#9el&$)V(R+`r^fgEO?`i%Grs3Ib+0xC{UQRB5-?0C)bjghxxOSaJ*KXXn^_ytN+qZAsqW?|z|BEhMzH;Rn-L<Fw%U7;G7P&`v z?QeZ62Di7I_mN+*qAv{GxfreAm3xTeyqKiE_r{8G8&=qEOlAL2vU$%Fm!oXt8Q=7T znfC!b?Z0RbxqS5+-37ou+4<)qAB$Z1Ti@-T)myA6gBbGb_g+o|_p2LIFRbs2GjeSj zaxKqUn!?m?uq)UI0HyLFY;FkHvcfSdY zUs4cP10B9)>KCSLAY$L3zj>>Mg_!@i5q73Q&6P!ATUmFLG9s5|(w-kDl90iQf-s2d z1k~s)P>$mwUO(t*LBgbMw7=G<3${nl>e-=U_qbTs^34ye*x^;BR|s#l+p@?$k!UkX zld<@1PT)vWc)Ufl@`$O+!{x;19x4vsT?|{}c-e$r{f`T!$}7L7pXJh>lZiqiEUy?? z*1BDgsjPYErkZ9+-54KLmarw6yP@-{*!gWU_}cpH^{E=mS^(g-t^Q~TC#qXeGfG^_aog^&}2=O>C;PKlW?l{`?7);f{F#e<4Kho z0wWGozds}}o-W%gd(xJ8n0ffz{=EZ93F32Hk2olZTI5%vc1<;XFg`i3Qi)#tQ=)lL zCplJ&WiZC8E*fDzc?C3hZKO2r&)6s^3z4D)wmS?ecJuGKwyU*b-nQQqSvazqiA$yH z53qIZxstuE;$Y}7`6Le~m55o7oQmLr>QaRI+D*w4 zpj8&#n7?*^0RU_Pgk^z1%b?RvP+9&VZ-5Q_2US@vrFTWShxa|NNKL@a!PuoSak$Q^ zx9T8jLm-snUj4@ChG?+q5ziQwETi^x)?Q2zQ5Qgj!q)$h0;+FoEn2ax=rHN#Z+)Ki zbrnuY`$YNnKPA=+p@$6vPtvF;o+cY#F`1Tl?EtoY$&MoIkg)6eU;&PJ9XB=Rx*YSt zEy;Bf>XPp8;U9ZtM#D)NC7_C#&t1 z>tq6k$G57w5~!lR)70-CddV!t^pbSCOONV;IlcUA$&4_Tavs(akJM6Y4E*BPUhNTn zc9YE8%#B*Q6|5km3b;oG-n*uw9z`k6M?YeR4fd(0-=bs`JN)s(uHr0pT(*Y!D|s5l zJGBte-0+siUu>{Zli@fd>Dukq&O{?A9Kf`$eJ=Pj6?9?b6zn(Kv}>|bGOZjmIM9}` zH5FadRKJ9S1^{oHih__OwSU6aQzy)R-16WkQE-4>&&T)mJwblwD^?{*Xo%j0TerK< z?iv4hQkk)&JUc!yR{etXKHs{a{-9v#m3BT@Fugix0{++|uGW8W+p>qJanQI$Oztw6dN|vg`R{pLw;NVGRwbE}mgxf>z}nlj%wlf90%J9}ko0^6 z^Le;|O!BHuo7yycgy}sAM^x4suo;X zwejp%r>;OBM4~p>o3uB=5|c}yV!2{W9J$>A_JSi`Cj=%1X?jLf-HotYRhq2(O)nOQbVSc2^MC@vf(0Q(k$ciX>b%fcS@xmAUsbzK zOV)v8I_doNj86k5qb0g3gjM+0*a@AC!V!AD%%>id3d?)zdCR`!iV{&hom7@(i|vpL zE^UJcV*;IBcD8wDZ7%6C2-H!#{wF<%TLVl_V&><}B=wW81JPH>vFRxGe?*T>PkzkV z$5Hcn+*1=*9}0vQ*~LW0)YkY1Js2?>nV{wv;&r4wj9H=fD<-g5&ku}q4YGKMcFkg^G#V6Z(^lU4io!V;a)lFOe!{usZGj6q9R%pGvLDR~r zEICLPQPud%j;<;)mo0qoy0+(@v9G1QEV0xo1p@0QNy?v36usNL&G z8;}x9;wH;e&H*eHSaK9#OX5yqzgZ<|V^uF4*J(m7y0Syx-GT|wU%)}`i{P!to=CXl zYZ-WzbSkjeC_CL|gbS9&ejl-Vm7(8X*>d$oIiIUBN}q9{w4;dXWVVH1pCtvxq4Vw5 zAL9QfRPomJZr!}sX3P`fXK`BY0K8LA9Z$* zt46vGMn61O$Qd~7GEU5rF2v-uQC&gh=Gu58|>Ng@H(I zX55K_aE+JC$&Urw)d%y@%bt0e9tjgdj45F|Tuf}Nw&wuB!Eu$t3psA7u3)E~HO>HD zt`cf_We_3Gy=fs8ZM4hezZKS9uH0N;xu5?}1pNPAE*JVbsGG?GUSzO{7h9rahm?+& zhZwRXT9O9{)DsjjA^VA#$%)qLC|fRW7g_tg5UJ{A|h265U; z%&z0Q)9-<^>Tt7XiPVfz2hGZx0y9slqSyqO`DM50HO{Q-8Q0YGv;11Ze|HX` zhVe+h?;_yrlUhrBLmS>L%lb7lelVH{KjLefJwoOr5kpUWVZl1mVxaW?s(1_Gt#d%a zgequ`d%%n+BfA+e$63yN4ru%vk3G$s4x!$wuG}qPcaJ1h;}6_J;WFl5XmYr~EHmBb zVdobTHt5}HOwgS~2^A)QMr~P;sHE@QE*i^RRRd(CeEUR95-ApXm^1ul=dl-9^S0^_ z8O6D-($ZdfM}hQ()GVzQ=xkNB+jq71w ztuK0_{`jBbr|XzoqI67v3GFFxr%ND>f|Pbv7aN1(dFhVu@Qfutv7F(Els1UgO1;4202@U6^y}G)zPjrC*PiKBGw?$$UjFmv0U5XrC?h$;MUEt083@}tvIIG(Z6CR6eM)p?~TN62){2Zkl%l> zETc_uTd9~uKu!A(*3Btu7EZVQ5Wx_&Iav9@!8o-`kg$X$CQKotl|7i!9A}kaqM*$Z zUG&4(llK>W`%HS0>y6wC{?d^6;L|-|I$S(tv&kwZ5u@Ag08~$#4xg7+DC&yy(R@8G zw8ADOX({7p(-(a>QFHj-NA?_$ggfJZ=ma5XZ@4e8Mc6Yf~3-P-aV#F_2JW0}tsYmIo#t@#|Vp z#}ZU0Y`I|{ZqzR{Y1kSatM|VL?g;24Ekn|o*C36s%?HV6kt#5r z#{Ry=JR`_PdIhmv4ySIMYZdKJYSP8{ZA4F>xc3?^mCBH_7@(WU=YW)?`l>U~WNmPY z`2&T@nu?eNR7yqD9Bg?N`zp3iGgxcdA207>D6@`X-APQ_2^8zFzZVl%ui+T%+LoNG zf1qRb$+(nhv&+4MoM*o5XAr^12{WQ)Fb$i|vdUDmuSgvG-@-R3RN0re@mI7h%u zItvd}qFl8*>CPoOyWYr<6rq_vJ<$DQz4>CwQUT61*+F z^*yHoX$ko@De4l42&(TlHFNVblqd!5*Wi`qTT@fyN5ob|r}k&wcg-0q);M(x1XYGS zO_FZO1Al1EHlJ7>Zy7Lhajf5}AzqdCx!>CDTQR!gKDTcZ7PzId3LX-|HN>iW+FvBy zbtZ6Cw@L30iLXytH$Mtu?_~=*Ef_~IRM$#yP2zgOuDs_3jU6GEb_QOnM(sR|shH9W z<$-+aJ+rsmhn&-lAP-lZ^Jw{n9{Fy7mkC(^M z3304YNHYiICsN15a=-(~I>ci(ONor4g!w;w`CLojwen$pfK8|*Y8JVKpe31#i;Gv3 zl`pFnR-k33dWWHhn@6&AZiOS460#yR#9Ywa9-xHCl2|pVnvRyCG6eAmD+7|yn_Ae% zLH)Llq)Vh_HV653OmMF9ST|0Tg`ZeDdR5s7agQjz%M#L4<$*dAR1`nG4ksyCzdFpz zS&;~(i%8s8$Bt3d3(H3>)XGgDItk8emgHh#C*CM-TTCkBYRgTlx;|1hWgd_-L6+$8 z&oLP=mL_588JvsX-rt3o*QzncXEtgsj+X4E-vK?*G#$0MRXmto;$kxEj}KsMqwLdH zO_F&(IA=BYVjRh28lQE(GP|uS`x!T1M^Ui0Ha<-YELLnU6!~3tG)~R|1_lc-iJ0 zv7;ncD4VFta*5zjhfR)Dmi&s_3LLjHqCA#a@f7tiDNUj&7K0!9JHKu zoRXq0<%uLcbis{+VJ_V~&|tzC4*DRY)Ia+nxx%UMfmyE!*-$p#f^k~JQq?1MyFw7D zDWB>;*pWytbfm|r-_Pfbe{Q<>Ikp)WK4WWdG1hY0lXabA^=>wlPBw_*vJ(9%lu-q54~@ndrs z+~A54EtKvR5lr6Fl~u_}mr&_syO}~IV%is0W{f8`w+tRd>L)SvjNV^4NveXs80JUc$pb*mAH!$uKwutnF+*hv+d`f=PKVvv$iUCsL-;3~M?Ae59lG?7A!> zLp|%rUePc=5n~?vN`y67Xx2yWH7oU--_H$`CBjt+F%zJd#iJyLjN9Hs=2Gg!;GRWQ zP}o&p5_}*iFbKXUkq(XbQu}l)1KWd`-&O0c8OeU45h+Cd1ewwEFUq! zqAdb%Yz-5^XJbn$@&p4^pe&vEsvdeGa9?YFgcFm zu&_9R&WZ#U*~53!;F88o$#V0a*TsuKTLk-nf9ar+b2eXTU(b68H+1Z9Z=JM>rcz7F z_RaiG;ct2$I4qW=SFhZ8v8BbjUVA#W<%e|pF)4v+A`4#*pQ}s<^H*546Y(gMK40!y z1v#H~1xK>A#0P*fP1U$rCUg2)=C`Y%+K5mt9uF&rM8s#i$CU}al5D@pnEkcY-D!D&aOFQ zK&z44P_<6S11PKivg3)hSEyu(nRuoFrGXcJ64*s0-!<+nsV9Vy+F$gfv8~*|Y7gi( z8B3T?wi^jly9a8j#NVE{`##ZW$5%Mc1Nq9MHb}#%0|pMrg>zVC;#x@_aIYJ@spP*k zczyv7OHC*{&^TwI0dWViNO$^p+VkAG5=#k$SwXC+5Kru!ADj5Lv~8jjD<)uUl@^%} z0)guWc+>)mBNaWzeSDa!^GR54^u8C?eDN$Uuu3v+yf%&FDn` zi28m%Ss^eK+Nzf?oH-i!Mf#j-wC$E2c zQ~DW4r1Vips&AZT94fYzLd6axqvre)jelxDJ=oWt=38SImsJeg5jLto9NF3iTZo3_ zm*QNDyPUKr$K5PjP+sZaIBWkLjs*{2^UPSZ{~=6d>_|ue8H;pW4U1B`I)1O`Yld~D zS=!45aD)>)q0(NrMZzL;)3J!PaA4gN?u;4ck!0@c2L_<6$O#QTm*<`U|GIf8<0$D+ zExwi@GxWNLNybqy5eHFNm7n1A&6!mZcW6jrB~>`YOxvS_1PvtW1XcHW6fTw@T|_CX z0LRjdSR~W>CvME>x!B!1>jg)o18Qmxdd40e5P2q#% zw7|kf4`V$vqOY=F(J?xhL$J|UiF4ItQl~Uxc{^>SUNW74sCOUIooH3P)gKvj+jsdE z6I6|g@tpgD_e!+vIy3r7aOM*l2wkD;e5+6%j+QmPGGRe@? zNAziOY->1JLPCMDjfL-$){?*^VCkf>D|ik%J-Jmh{#2U}N} z9b26d@VDYEo9Lbx3mTc<$Kz8P?L7BD>vZY3*hXP1&PG26(bw&)h!x8ni-jVrp|cA= zIih4yZYb5Ps>H0KL@V33Yh}>_nAIzNEU%L<46#bV`rK!zaOM;4wxJN%Tc5N_c=J8% z+twhnluwsxcEMPhDplHuK~86GQzsZ3wvrMXm5!-|(SB>cy(I#KA&J~Pyw(i9I_dV% zrY~pRTXJ-iSkV!@XCZ_90s#DNS#ek)WTVK;|L1nG_3I^4`V!V#CvYz(C&H<|46omj zWic81V!Z;YpuPh3SLi1}qU7e2-aSP|F;v8~LhLZBZoUL3VNv7q@`*k&)24<%x$sSs zfmI+-K0GDkF(RLzL9BzJAby->QCsjM(Ku!G#OI~vK)|K|va=P;m*>k&0+n>@4)JaH zNa5|vR#5clgO0q~rG4CD)JAIve9HkEY`8@v4~>24O;#N>h~k(cmtyP(O$LotKZ&n> zP+qIhUY@V&7YHfw*O?`wBGxLJ2)edxK-dJ^*$ZGWPG}~=aGO3*hk0bC%0BZ>r@Wzp zg2!Bp)_t%i*R6*HjXAQy{+meO*;-ssOp=?xZGZlm8JCU$W6sx`uaBZl7Ek;()O*0> zii76|`P}+yw_HM*aySu%ZNmfE5@=0YAi{aCqqD34G~Bi&7daqWPY;QS?bze7Y!zLS z_H|W)#+GlzCq<_5L{CgSM~7PK(_ zEb1o(%#d4#2F>#7TFH<+)?AxG#j{IqU_CIQ{D~knomb(`Yt@HiR(By zOQ=vb&&5j4KWxJ31>*g67@#99=wFx!jDn*46ShYJ=Kwkhm>eOHrM1NoZIjYQxEt9& z@~9KVUe@k(JGUo*+j*mg7FJOBVAE!dZF%{g;R#zKSh7T#vRagaV`ijuREGK*KIk+* zxx$N)ym&_3i>jJaoogE0=^(x*u(xclzgC)CevbNqlsx*HA-=Ct_ob<&xl>3@-aSix z$e15~5A(shosFT7DhvHx!;V+g#b%D-Fq@0syD{o=U%OtFqCLril#u7h8W;HLnh=k( zcErjdwu>9#!5Hk_Waer(x`OR|jo^u9M`u<>;10i*lG2=3N(;(s9g@6YT)#51K?s&7DM~5BZo>t#oDjMV7 z`_#2o#Wc4BJeI^6YnW&CCH9{2Mp@{^E9*(_3s8}fvyV+dKHsEK;D9X^c60?Gc@=+I$;a=UZ)g{Z! zS&;j@OLq%UXSxuj#AY2KEu~r8Ei^6Hw7u$JWX#Qg(x-P0FK*4|Wn^UJCvtdHrW=;H zn^itK7*mb=@$>sUi-{2?EYjtrq+h>-h~1nT-=^QK4@6fVCR7AT9#k4gE=(5{{)9xa zqb7L5@4PoxvxAVDzo^;Q~nwmusC%#8< zp{C{(IHf{@p3zS}JBy0yN`xKSP~LKL{@UB|&P)R>jWH#?<$q#*Mj<)aJVB!UkJ5l* zd4c5cb*HY7FpVlt$!$3Nc3vH*az*{uTSVlOCv(_{AK{rFUJTOb%x%kbW4`$Q+X4Nr z^CgsmYWGCpA-!eR-13-WOiQv`aw)4?8EC*EOHL8g;c1aFQDc{@TAS7(s#(T2}`|?{0*u-PX{^3I5E7 zmy;CRKBnd8!?_Af^g)ep76I}3{UyXCO%;B|=$DHxHxnbgdN~Atwb_=Bb+!M9m3lyJ zv3lNDm~>Bs&5h3&9}T4sdDc|g$W$Sf07bjXVISmFeL9V()-HZUoB1Y-m*;@L@kz8A zA^doj?#VjUl(%1Tq+oNHEOBG{G&@=+YT>RYEFg}fPlx{GUgN$uQy_;B@oqX?A?O@n zgoz9I{4eSl2_+RIpk#1|=K zWyfEQSpM0wGpcyvsy7a6CuXhbMP>{sz(Vv!pS{pP{bGs{7q`RwDSkCX!$LL2`$eBB zrN$5{G^6{RJ)>iUWl+MdAT!QbcSwAi1a7s(|5*~iitU(qXi_|c_sL1c8SOp;1wGch zq+GRN_>y>A1tp#~zAu$XLzvFz09+-~Do9>1Az|lUy~3D%@uYd{0e|pfuT*=2#ykJ& z-mI>`%r?k(4t6CSEq2Q*%BZ@+Wwsx@Ve$LY!R^iU1!?T{?a^^*Q}f{~Rd;L9TXl5{ zIcZIi8H<1I;)B(e*5{**7GX+U2WzwCcK9VSY8{kNm0;X#-t1FSvARCrdhd{YeD_C> z^HEUA8@QXqHn}wK1d>{r8Y~66EsuzlswCW--Uay_9mr+Vg#@`fjo-wmG*cTTPx@C> zkLVY>?%m8m@E@E5o^y3-HU;IUm1iCT4IQ#%0&tcMurgun6gdDvzOGQ&HBkjHw$aNWt z6%OT-hh0dMx7)TZ&YYaSG_Xc^Gdo!9510YUGg|J*-7i`|d;3eWUleM-U{(+b(A^z- zV^Hw*CF8|N+@+e2QC-!s)L<8rT>L&?y0yLj*~Im=6?vu@1}^h=JA(Jj>2KQ2;TIdGV&SpWM}f+;N;0>C$xxcVuDGMLoy68H$epyNYPR$Ve(T|w zhp`Qh(Vjhm8J-}>2p+@3?Le(nJrAK6xbxeF5nakri1t<@PzG7@Xt3Muz*XJ;6=F=( zZcR`mydh4YovWYRHWtRTN3qV*w8^#EcD3m(!4qE85*;t8cKh3XQ!6;+%6qK(x+}zW zCF93l&3>PZE%2mOLS7}#iPqy3cw98Qy#`yD+7TTgf1*v_RnDc4Gza(Xr~{=rI+`7_ z{h2kbA;C6F>l|R8MKcWKi^`)rk}4yk9WybECHz&ar}-0-v=by5dO={*MsdmQpELDL|R5h|Ws&@q#* z^(ko2>}amfd+-srn7RClJCNQHZDf%b!re4sFFS;;x35MqbaTo_`G;1XGH`t9*_N2k zp;h??V^3v|>NTq@&tf3MDY-uimnYA@dmW`Hs_5~?@ok5u56Tz#8&mm&iyU9F@MbAN z`UB4an>C7FRlG7GH41zV7KekUiY|HApI`uhfh^I@lE&>v#?~{s4_3vbuO(-6X(ge8 zdH8BL8=H6M88e9#9$;8a~JhGfX(D-;5U0ioJ?q`0_M3xfpRZjx^>zlkTNIm`oiS4`9?s2Cmb%CXk_&0$x14+Y z`fHPD9efiut~u=^Sf4{F#MW%VdkAC5^VG$C@{{5ii^ja=;Di1JEo-2uvqNfX%poB~ z2BV)-JwwvNs^%{d!w5bCAkVSj3rb}}?#j-J{AJ7T?QhPj7tGirYn;k9+=>=xVa#FM?r;k) z!&sf5Gp&C_>AT19C{G^l^^+k^;PM2OQtZ*WBun?^zZj!kTzFsmaq;+JSGv~r1IL+) zp*{ML>^g{5k{g!Mfa0|V`LcT$btv(?O4gCY(ps0iIR3(WZ)+66yR_42UR5cTJWBfH ztx89k732NVS0 zlMCtvwm);O#Qt#L)G%5#A;(HJ6M0o9g&XViY{G)@h?ZviFrq=yUQD4hgC4T+Q zwn3En5RbmTp+6r}Sl67m%2|zT28YnjZmQtD8S$YS**<>La{#^x@-pi?J3sT^r8O|c z(Y+sZKi^x3yT#eC5^ru@QXQ>&`bB|Jn1m}+n zC^cQBuW55*)m?NB_{vhTVitUOy|wPpAYoB))o^Gfv0?!<`QoR$3*;|x7(7m~&w}A! z@j^N+UijlMe}8^*SMxxs`Wpa%2&67e>MlT|5Yk_#qV%18fxTnn(xd}eNd(qxUIfbD z>w}F9&nPYZA*HZ~o3a!wbE%czYE9#v!VD?8vqD|r+q}sJ^^}5_^%rn7k9MX{Fhofb9g#Rni>}ckB!Wi$uV?l=c438~$ek_$A8|1MHh&I4H0*=jeF_5Nrt@~!Ga&tY9WU=W{{q}L@+gn$X7end%M~czV#B}Z3cYju?$?F9zl?53w?({^J6=N1NiWR;KCD?3mzE*P?GQ#6 zm_a|TA&t>)JGvlhY7I_5iRo{SI9iD4?qxB9XLk~Y39=9_nK=L5uM0VG)<+j|pmHf^agJHPDme&cm9R65z{lJ2 z`^U_uhO3aP`+oqgVFp{}gvHS)X=v_R20Xo8Y2JSQtC29c!qg z_vWDNn%fC!IWAn>C*am=)WxjXcMIVW%42_(`b#%n9%4{~!ynC6r*uU>uyL+m4U%dGmdY0z>9`VY-OUR%$VpsmO% ziypab?9!Q|V#y{`@@<>K*C1A?O;o$3DOez?nKuD0JL7P#mk;MU_?0e}2-a4!Ej1C8 zY6a^_hbM(8fxRPAeg;^qQ5JXh0>_+10+nNd7}odaq|<4$GAvKaX>flFsul?H+Fpl<+Ya0>Hk$2J3KHW_b6 zc6iKhyak%m3{+7QUk=dAhrFcplR}ZF*cI@72^mjy%TI1=gn6#gUcSy?&MgOF_qGEo zy?$>0Kxopy+vUW5ctOzDvht>EfLbXQ7qZR#Y&_$g0{^Or58IZs)G=<-O~(J}UVU&o zTSMzyE5_X?dv42NYtK^rICbZ%1w`COL|W?;yIG@@WASK-U8hu8jnw97+X%yqQvxiv zsjQxJH?_ybF{9oR3iX^Vm7Mrtk@{n<%Uao@6lWv9Nm@YJ=$XnBY0F)_)2Svzb+qSD z46~zrD)*V?V~|fTvgU4XNJdRn-bT6FoN23f%FVgbb)Pal`8MJm5I(b_0H&v){vqUL z*GA=Dm1}B{+#^jvy|n+ZQcuxgvMubu08wo}3>m8@{mR#uxv9#lz)QTYZ^{LVc3xLe z^3JK5bJGv#y{k5Y0>X1<{(5fgP-OUva)pKPatCOk<}19HN$2nXbH}Za5p7UkRCf`r zuK?hq4re~Pzu~5)@9fVY|dxqL_*qz}e$!t^^uFy27Id{D3=o&&6DSRn}YYhoHegO3uH zyE$f&rNFW9H_nh`v&a#Qa&$vL3@1Jo&d@C9E^fp7| z*U80}kb}{401ZPN!AV(+T$q!88nkZ$=#5;EX~uhPr%&D4HSUtEz=Yi`?$Sr#i@JFa}uePD2oy-WZfq4&p-gD^LD6?&Hwg zMti-V>`H0-2&{`y1qlqj`?f%aunEpRd2rxsl>9^Hzqp6cM~w0InD zb)cJGQ@2~<^Fk4ZKQI*qZ;u`v&6zBPi7F`*7oh5XNt7&$F}0O%ZDnL(qmgDF-fAPr zW%W3+_724@%|AhL>>)04Cg$Ou8YT5ICG$V5=u%^kb>fu#(TL)J$TNP!#fsyf-IYJV zU1_$^;klt1L(e_xY9t|b$m8Mo!|x7SPwS6P+% zXr)?!K|}rISVov!cTVMh&&JZF6^b1oXhp37>E-+ptIb}S08+uAKC{2_LJN20koUR1c>lB#61PP&>K{^3PW~AMG>OafC@j5Y+6qeBGDmL zd)rc#rd0Y|-;R4P-&%+>TLXRmA?~tXAaJYJC$?6#!&|^dj3Rh~wuk~Ra=cDB8DTN2 zOxT((QE(a*_1l+9UhiM{weB&ht!TUz1G0MMz&2U}@p2wtsu(FY7hz}yDR7u3&SeII zLWvnkQ?AckyMpRiDpk}trKR|QH_{eqYgmL|ubQaf%9>~$w!IUHULJb(ZpMr&Pc_HY zN{TK#G0lu|7bvKa~ybm`lWjWK(#v8enqqby$ z>}H6BpwRPY4N)Cl3Sy7k6FY-XeDRk!4qeu>13!xF)`an0QeIi`_O&35tGEp6DF}2B zRrfa9p?7Z_9T@)-V3Q2Fd{8)9%^57=a2!0Mc@Jm`7^wQQ)TVCesC12|#KYeGu66#B z;J`;kf#m>S($c8&;=i1;C(SwQSbm@seIR!ilvh=%M@@fjhs0+rX0FEfj9Ko=@d#c2 zs58`X8oIn|5FDAQ_XIe9TfR6zcL&p9FlpYaNmt7oqBPn6iI$tFI$L%%k@>x@Jld!0 z#w;G&@kRh`FN*CZb*jH+VYyA`AQ~)EG%;kMI=GjDsPdOys-K|cigb6T*Eo+t;evA5 z?w}~%7nH886Ah}K^OctvVgH-7mV)63Jguj|qF*v1PT4)|CftXmIgYexFPvPY#B-UTVHYJWL(YaMo z{ESk5le68KnUZc83P1QH*t~Y17}o0%sDo33Iq22ATTm;YHN0G*b$zet6yJU%v)HdD z6iL0B-m1fq^qCm83Er_d^V&KrjG89%KU@+7YX@uVvsZP=#s2yHD$Y2ut$&6vkMk`wX#`^XJoFZ_;$lhn4@IJ zfY|dj_~mU2-1_iEyx#qb5x6_*r#MI|0We*RhTOkcYJp#|_!5{Ue4|&A*7c}UjK4V3 zXkFhG<@P#(fK>c8F}t}gZbpeQ`S#y=p}&dl3X-VLXGq)=))U)u7oCDyt5$AyJUdNq{teV&_OG!w>rr1)HEng8t2&_e}mwz_< zZ1B;FVjAUBCV~*LuTXtVW+E(WDO;CN-~T;g?y$8K`FYH|Wm)U0EVg?l7gI0@z1LuR zC$L@>$r1L6us425u)sWyF(yBFmTyodM;0x%Uz=tyUb`k{dFA{wxxysfxVI9$GohjCpn#6wtZq7t!b4=~Ju6sgsaVRQ~81kWFkb zfYERmPL;m%=|KU{bgAIloaBN1_R{7GOmkx5k(MtSmW%{{YiYg(8z$2#X5@l*oJ_+A z4uRhrd%E;lBWX>o1V(0OzGurV7{6swN{wCTr_$S4{y&G{-b&YbGu6(W+vu)$gS|rF8b@O-yxLc|B(2XZwwOh5v$?Il9 zY_k9qR8y+c5oyPioMDA1VxrZ7tqrvj*&KeYm3(ROP^D-GgT$e1Y2^|2xYV@gvk?h_ zu9g#5Ylw)ZIrbb=gm6#2_JrrnrsYtt=*9Vyk^IrDlO#gKlPK_XZ&YP$&uqQCAd+SF z;LgIhb=J}cft{`aJyON@o?XLsx%;}F{%_s4tGtLp*IikOVJI-ik8O+nVbmxphgo$T zaui4u5J_BiJv-K)M&s5lB(F-WSFW~OKpML1VtehJH9y%pe?s^Iy*K5isti(aPOJ># zr}d1vU8`tZhB&p@o-+r@I*2lM=7XuI@ZN7>UaDW{R#}qC^PMeoXbiaCNIVo<+RLvM zfD$m65kc3VC}PaNYe$B0A$>uNw7~GR@uv4A)%XVo|KvFU8@H>64`jRZ#~ooCTbmqz zH<1Ojhp~J1iqRzl%Ce66!m2}7-@Xor6iLh+$B#7Mleb`q``!tvk>`;WluJt8*IEX< zV3Qc(Y-Lpo&hXxpT90{c^DhHKPASo*xJ($bHP+>NtH8Y z591pS!D!Ddt=rp*=v#7<#t%x3&9wYt6Ht#ls(MQ#e-1gYzHAspwvV(R;FPh~b4w!m z@kfTT3U8gVB%S(XbJkz9b8wd{IGJM-)%a%vs%pB;iZOXc?*~1@dt1UnBVfHuWsNp) z2?b=^xEkzpyA0)+b;SJBc3mP-+N7heR2d{27Q{cMy)%YnkVw$mzcWFOwB$0OX_!7E zYxo>m-#a^hiJ{nV>Yso!_utjCpnn0*JcWkefRnD@=cUP-LFKlNGnAK<@9v|pS98;7 zrn)l2WO&emEvS0%!DQO1R83Q2kI#dK+clb(igyUEp7p$&%1#CTF0X3PBnvIa(J>)Z zi}{b)V!UfkB4oTIdUj6R!b*>d2{pxzU3IO|(#r~AuKl?VJ#scA%fsW;Bl1y?d?%YuQ1LGJ zqNMKOq)3AL*9gMHTRK29kqE2;Bc<3_bT&OPO>03msQyozNW*x+C6=@uyWq@2L$@yZ zASle^Mrl%GP149ng8WBBX^HplQNQ#vz2a!g#tNtR)04fdy%vUkk6gq=*1;cw&hk4c zWNcvzC#`u$qKMWqj=gC(=llk6*?JkG9?bt`(~LNwz@H)SM>UsbfY(C$r1&uI>chl) z{Dm`Qq$@RR?<_>fsa0{1!#qUrrRqmNe_AuAMIEM3cqs5kf2ux9>HEh4ES9hHoSg0* zR$$H$(Ne!!=j(v&bzK{;DW|==dy>^HqLPx&DF{S30;>NsJOWGqM-|cFKhYxq@VxyI zk4+A;aA7R|x3RpnnZ<5dmvXQVffXt5RMu9V555vB3Ulw}AnA=Ojzi|fILwYUAb_Wl zIHQXRmtrw^%jUD6HzG4TVCyfRBK+4>E~Vm04c2eBkPBp>v5&j3Bl`ryA6OK8ZFIjQ zAykUDQ)Lyx@wp#**O&a?{Ttv9NXhtx!c}~_d83PPP2YZH;-0}6ma&W0yJ8;Ru7I+g zMp@K=iiioAT~<((-~ZLlbw)Lrb!+B2iZiwW1eAIQ5TuErgbw3KM?efUp^QXoh)4o~ zgr+keLTDL!5r{((2n3`g5J+GKDMADcp@ycRx6nZX=#Ag3yS{JknmcRVpLeY}zut4+ z_3m@dKKqn+@8>y3qODITCF~DwL?I1!V#oK)=NI_=PJJh4NK`Bdm1^p{Rn|~#1-dnOs5!PVj8lcck)u?qQ8M2Fvh^vt*u{%k z$LhZ1fx9lsW$oU=R1b&Vj1Ytj+@+*QmSG_A?Cv}dv!@lZzGk_*enO^bExImBcf%m8 zM)MWJbNP7NmCCOGhmcTk*KVbpw_lpc?!HIo8Z!D`O}b=-9$4Dyx5C_t#gf%a-we$> zzi|1v@R`KyY_6>88+Xs7L7ni)!;R7aw>R;GF;#GR{hd9VK4`RXQ4k2Ff=pw$AUKV} z6SMhQYuv>;@gIH^f&3;+4c%r#p>PyYEW;}MY-FCV>u{_d8KaR(=^{9%%t{Sye&TZ# z9&MG=i5h)sL@pL2FwSbJ}Q%=rN6 zRPAf9T(c1e!Ah`(*;+tKMp@s4KpU&CO%lCNj1ns6dAbp{sRm`u5i~!8@6_~^!aoo8 z*9jQ}B}aiChf@16rEsYy?~DIY;W9NH-{3L-%jy_>z~UCC+3pnF3azthvDB7&O6cfZ zmd;@9yTdc+(Q9Q)qtW93UfeVaZ9*3>7_0S%l}6voI1+jC{`mz7cX7#`yu@Xz!h~Xb z!T1c@IRl>FUu7NF4(BrmG)|YUhvy9k%*=V4T3cIWFZb@QbF1w9r{c^cfONPLdy2EQ zUa>&^U>-=epX5L?63*o>)T41ubg7v67uQyaU#4y?#F}71QTS zYB>Y*>JhEc>nGaz2CMzLs2;AX;60Q>r>xyG=N1Oj)0{=5}k+hfLygxvGhD;w1gSn5DM+iNy#Rnn1^@(M0Y4P$(if|?6 z9xWjVEBOzkqZnbA2xLASQkPhjmJS#lrhXmVSlf2-t*%N&z>N;~>Bl;ilNDgjSi>ot zDCSAKWw%HJMddlW$6Ndk~-px`YPKroZd`vG-O5b0gWjtst4XSbyfimQECYX zx0H$+-}Z;kTMDT;H28)2O$3T2Zvs4G9N3yGF zsNS;(9+h%Kj+7iJ;xD}6aHJYg5YByIPBZ>q*_fkmcRqy4G>9(AU~q*jmL$BqLY~T> zch6%y*oL@XLqsV)2=zD!r}bJvJKcgyp{BmUl%B%rz^vTN4mgr$cRYR$e~H#d?acyD z>`#h5nJ(0kL6`hCD@C#LeYHflr&T$XX_^fI%M%>LyT)gd4H@v^s7v>&<rE6Tcy z*1fZA__RBWT3OM09z<>~F+l0q9KTb?y*l8@ z>R#ZAlAkRhbycd;omZa0AW;b)0C!u_JZ^&=o(_vLf+X)Qr=K`+C3M?@2aVk2BV*ER zw`Vf)q%&O(Rdx&FvV$MRXH%5p(4*NB`b~2VHrTO|A}xJvpgzVcS@~=7MpQNu9%Lji ze(v4UqpE2?#CCU{p3$oLgCL)7mb>+r0~9h$Qbv`Em64HUfC3u*i;b?+JylvzkctWK zq*SptH*aIa81Zwn!>-;z0dlDFwo6$hF~_}Oy)mEomeoI6oxr-yLsEOA6jK}NZr$iY z8Y#m22}KvYihA8y=|0!-nl*1~Gr+bx6>a!b&z3p~Nuf~au2udng&VZOuRh;@#&@$( zoqK=25m4+?@$8)s8^(KDpPIZW^YXJ`!!xk?Wrir}@>(ae!gYkXVEQoTkvG*p#h|bd(IYW1C-r#YDR7$gkpXbE zKASdkHM3hQ)%r`7!&+ql9ce)npr?Lmv%rL*rtqE&g3UgE6 zdegNM>98-nFW;2Hz8W5*1O%GY{4(dTOJ8lj?^DAu&FWs|)H6#t?j#>`8uNXQ_++ldC<<*OE(e(O^rTRQ_ z5CR&sb@h65Vj7w_UBMphD95wa8*S(kFRsNJKU!bR2OHx=?=g!#g59i=n%rzkLMP<1 zaTpnlY-}%7QeW+tg65ASy6Cdu<8foNklRV*JbhwRx#!O?r|K?t4w<1>kWy$x1Few~ z*`c$`sZz%$XKCFW{X6~%%@|+YP&(JwYI#Z=8^>R|_0ntW?lCg zfi_3uQ5iCmzjf>@JR~kq4@=jsHazA*s(u|6OS>RlT;E*u{`J@v5HtmB$+8$DbbUmY zGd&=Oia<`B*~IyII@o;g&TWvOrRKH~J4|^ow?ECfdM{!WOJ8+V%xk3y2((K$dnr}V zvCtL8J|_lCaeg;IKLm#X4$;`!QWy7A9nZCDtYx{!4r8`DzrO%A5G;UD zIri+kgUt^LwLR#cC*(U5PIHx~H>6M5RNWAA`C}{oI>*nWBJYdW3|n%Q&<45A`C4CA ztm_Z9DVjw+Gv!4GYKqjB`y)OudC`H#wYzQEMoRn~WBg~-fKA%U%il%+2OzY#S{%!8 zS-${NRU-+v+o8IuhidB(h5-cw*-wPlDg=hMmn~{A!$)E8MY^tUrtB&&oI0eOxSv$; zTFRp(=eWOv?TdZTKBlXVjxDR_jP#qDsjW&8ac>DI_|0RtRLRsXdv{RiN2BSH6lA-B zFDG!8H$NbFl|Stq1cSTOg?r6v+|Y>`0vM&qFDfLUo+;mlcdBcNvl?brL66q9oD!$w zPYb_}D@rA^w|#!|>r4xs#!`f6&b8-#LPR~3`OoNYl<=H_t)`Q#eJ$e#25<<|^`C!Q z-~U#qckqet!rRg*E;Oyc}VT?etZRn0-yM7>u0*~M^&eBrN@*l9**lk zVc@fWRK6$F;dhbC^L1mBBI+9&Ih2$EQK(Q^OLK0xt3)90!n_<7kBN%b=uR1JaH_4M zyBNG%P#w@DajYFJQg9OU)^4Wt2c6Kb31$feDWOj0(V&d-hsa1sUu)bjH@Y9#yrO?q`9Gfc+qVcu)=#w;^>`-)N5bzGes->SGLa@xv6bP>a<~L4D>7al`F9gn z!$;2_?(tA`C5)Sd&jQaVt;pu++CmUrNBTczk`E`a>uZ2|N^+`fR@iX=JB+g9J)q&e z3MzZrC*NfOng=a`D^n&-;hbd+h6fUMCUfu%hx5e1-@){bnQNYy~Uz+Evd!%^9tYjXU(3i%cFq zDAn`cq-JyO(Fg&z zx<`n6)urN?NIkpR#$m}%e8?Y43wAzZqy8jG>&MG9$L{Ztq%ULJ7iths0>R7ItjkI# zR=ve51+}6o#M&S&9pl=fCOje$JVCx#*2|t4U@ve1n)4^p{;KG-)U-UM%~jaw8!hDm zrt7hY8HcHmK&ubYuo~DCVVXL?h6U_Bh6qaX+qg(n)mGg2md^{TjuD1Y<16XM1M{VU zSE_8*cL%^geVG3X1mOdKfChsni(EZb6~$N#DRsQN!cvrVk#1PTnTl?DA`ftmn7-{B zY?AvDeY0!Su$(}2bkZM34Gu83AX2(JW_k2Q$p|FQC_Epz5MQ0go#$D1$mbrYKD@sq zA*#MhTu2{M38PGIwkon6ET1>w0uf!64(xz~H3!0tPR)f^ZlrB`y?=A-gxrHN9q!55 z+f{ld3*bIOFvG{m~||T%y?JBV!!{;AGD' where is one of" + @echo " html to make standalone HTML files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " text to make plain text files" + @echo " changes to make an overview over all changed/added/deprecated items" + @echo " linkcheck to check all external links for integrity" + @echo " watch build HTML docs, open in browser and watch for changes" + +build-dirs: + mkdir -p build/$(BUILDER) build/doctrees + +build: build-dirs + sphinx-build $(ALLSPHINXOPTS) + @echo + +build-ignore-errors: build-dirs + -sphinx-build $(ALLSPHINXOPTS) + @echo + + +html: BUILDER = html +html: build + @echo "Build finished. The HTML pages are in build/html." + +htmlhelp: BUILDER = htmlhelp +htmlhelp: build + @echo "Build finished; now you can run HTML Help Workshop with the" \ + "build/htmlhelp/pydoc.hhp project file." + +latex: BUILDER = latex +latex: build + @echo "Build finished; the LaTeX files are in build/latex." + @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ + "run these through (pdf)latex." + +text: BUILDER = text +text: build + @echo "Build finished; the text files are in build/text." + +changes: BUILDER = changes +changes: build + @echo "The overview file is in build/changes." + +linkcheck: BUILDER = linkcheck +linkcheck: build + @echo "Link check complete; look for any errors in the above output " \ + "or in build/$(BUILDER)/output.txt" + +linkfix: BUILDER = linkcheck +linkfix: build-ignore-errors + $(PYTHON) utils/linkfix.py + @echo "Fixing redirecting links in docs has finished; check all " \ + "replacements before committing them" + +doctest: BUILDER = doctest +doctest: build + @echo "Testing of doctests in the sources finished, look at the " \ + "results in build/doctest/output.txt" + +pydoc-topics: BUILDER = pydoc-topics +pydoc-topics: build + @echo "Building finished; now copy build/pydoc-topics/pydoc_topics.py " \ + "into the Lib/ directory" + +coverage: BUILDER = coverage +coverage: build + +htmlview: html + $(PYTHON) -c "import webbrowser; from pathlib import Path; \ + webbrowser.open('file://' + Path('build/html/index.html').resolve())" + +clean: + -rm -rf build/* + +watch: htmlview + watchmedo shell-command -p '*.rst' -c 'make html' -R -D diff --git a/scrapy-master/docs/README.rst b/scrapy-master/docs/README.rst new file mode 100644 index 0000000..36dd5ae --- /dev/null +++ b/scrapy-master/docs/README.rst @@ -0,0 +1,68 @@ +:orphan: + +====================================== +Scrapy documentation quick start guide +====================================== + +This file provides a quick guide on how to compile the Scrapy documentation. + + +Setup the environment +--------------------- + +To compile the documentation you need Sphinx Python library. To install it +and all its dependencies run the following command from this dir + +:: + + pip install -r requirements.txt + + +Compile the documentation +------------------------- + +To compile the documentation (to classic HTML output) run the following command +from this dir:: + + make html + +Documentation will be generated (in HTML format) inside the ``build/html`` dir. + + +View the documentation +---------------------- + +To view the documentation run the following command:: + + make htmlview + +This command will fire up your default browser and open the main page of your +(previously generated) HTML documentation. + + +Start over +---------- + +To clean up all generated documentation files and start from scratch run:: + + make clean + +Keep in mind that this command won't touch any documentation source files. + + +Recreating documentation on the fly +----------------------------------- + +There is a way to recreate the doc automatically when you make changes, you +need to install watchdog (``pip install watchdog``) and then use:: + + make watch + +Alternative method using tox +---------------------------- + +To compile the documentation to HTML run the following command:: + + tox -e docs + +Documentation will be generated (in HTML format) inside the ``.tox/docs/tmp/html`` dir. diff --git a/scrapy-master/docs/_ext/scrapydocs.py b/scrapy-master/docs/_ext/scrapydocs.py new file mode 100644 index 0000000..c23a890 --- /dev/null +++ b/scrapy-master/docs/_ext/scrapydocs.py @@ -0,0 +1,148 @@ +from operator import itemgetter + +from docutils import nodes +from docutils.parsers.rst import Directive +from docutils.parsers.rst.roles import set_classes +from sphinx.util.nodes import make_refnode + + +class settingslist_node(nodes.General, nodes.Element): + pass + + +class SettingsListDirective(Directive): + def run(self): + return [settingslist_node("")] + + +def is_setting_index(node): + if node.tagname == "index" and node["entries"]: + # index entries for setting directives look like: + # [('pair', 'SETTING_NAME; setting', 'std:setting-SETTING_NAME', '')] + entry_type, info, refid = node["entries"][0][:3] + return entry_type == "pair" and info.endswith("; setting") + return False + + +def get_setting_target(node): + # target nodes are placed next to the node in the doc tree + return node.parent[node.parent.index(node) + 1] + + +def get_setting_name_and_refid(node): + """Extract setting name from directive index node""" + entry_type, info, refid = node["entries"][0][:3] + return info.replace("; setting", ""), refid + + +def collect_scrapy_settings_refs(app, doctree): + env = app.builder.env + + if not hasattr(env, "scrapy_all_settings"): + env.scrapy_all_settings = [] + + for node in doctree.traverse(is_setting_index): + targetnode = get_setting_target(node) + assert isinstance(targetnode, nodes.target), "Next node is not a target" + + setting_name, refid = get_setting_name_and_refid(node) + + env.scrapy_all_settings.append( + { + "docname": env.docname, + "setting_name": setting_name, + "refid": refid, + } + ) + + +def make_setting_element(setting_data, app, fromdocname): + refnode = make_refnode( + app.builder, + fromdocname, + todocname=setting_data["docname"], + targetid=setting_data["refid"], + child=nodes.Text(setting_data["setting_name"]), + ) + p = nodes.paragraph() + p += refnode + + item = nodes.list_item() + item += p + return item + + +def replace_settingslist_nodes(app, doctree, fromdocname): + env = app.builder.env + + for node in doctree.traverse(settingslist_node): + settings_list = nodes.bullet_list() + settings_list.extend( + [ + make_setting_element(d, app, fromdocname) + for d in sorted(env.scrapy_all_settings, key=itemgetter("setting_name")) + if fromdocname != d["docname"] + ] + ) + node.replace_self(settings_list) + + +def setup(app): + app.add_crossref_type( + directivename="setting", + rolename="setting", + indextemplate="pair: %s; setting", + ) + app.add_crossref_type( + directivename="signal", + rolename="signal", + indextemplate="pair: %s; signal", + ) + app.add_crossref_type( + directivename="command", + rolename="command", + indextemplate="pair: %s; command", + ) + app.add_crossref_type( + directivename="reqmeta", + rolename="reqmeta", + indextemplate="pair: %s; reqmeta", + ) + app.add_role("source", source_role) + app.add_role("commit", commit_role) + app.add_role("issue", issue_role) + app.add_role("rev", rev_role) + + app.add_node(settingslist_node) + app.add_directive("settingslist", SettingsListDirective) + + app.connect("doctree-read", collect_scrapy_settings_refs) + app.connect("doctree-resolved", replace_settingslist_nodes) + + +def source_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + ref = "https://github.com/scrapy/scrapy/blob/master/" + text + set_classes(options) + node = nodes.reference(rawtext, text, refuri=ref, **options) + return [node], [] + + +def issue_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + ref = "https://github.com/scrapy/scrapy/issues/" + text + set_classes(options) + node = nodes.reference(rawtext, "issue " + text, refuri=ref, **options) + return [node], [] + + +def commit_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + ref = "https://github.com/scrapy/scrapy/commit/" + text + set_classes(options) + node = nodes.reference(rawtext, "commit " + text, refuri=ref, **options) + return [node], [] + + +def rev_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + ref = "http://hg.scrapy.org/scrapy/changeset/" + text + set_classes(options) + node = nodes.reference(rawtext, "r" + text, refuri=ref, **options) + return [node], [] diff --git a/scrapy-master/docs/_static/custom.css b/scrapy-master/docs/_static/custom.css new file mode 100644 index 0000000..64f1693 --- /dev/null +++ b/scrapy-master/docs/_static/custom.css @@ -0,0 +1,10 @@ +/* Move lists closer to their introducing paragraph */ +.rst-content .section ol p, .rst-content .section ul p { + margin-bottom: 0px; +} +.rst-content p + ol, .rst-content p + ul { + margin-top: -18px; /* Compensates margin-top: 24px of p */ +} +.rst-content dl p + ol, .rst-content dl p + ul { + margin-top: -6px; /* Compensates margin-top: 12px of p */ +} \ No newline at end of file diff --git a/scrapy-master/docs/_static/selectors-sample1.html b/scrapy-master/docs/_static/selectors-sample1.html new file mode 100644 index 0000000..9157188 --- /dev/null +++ b/scrapy-master/docs/_static/selectors-sample1.html @@ -0,0 +1,17 @@ + + + + + + Example website + + +

+ + \ No newline at end of file diff --git a/scrapy-master/docs/_templates/layout.html b/scrapy-master/docs/_templates/layout.html new file mode 100644 index 0000000..18a5231 --- /dev/null +++ b/scrapy-master/docs/_templates/layout.html @@ -0,0 +1,11 @@ +{% extends "!layout.html" %} + +{% block footer %} +{{ super() }} + +{% endblock %} diff --git a/scrapy-master/docs/_tests/quotes.html b/scrapy-master/docs/_tests/quotes.html new file mode 100644 index 0000000..71aff88 --- /dev/null +++ b/scrapy-master/docs/_tests/quotes.html @@ -0,0 +1,281 @@ + + + + + Quotes to Scrape + + + + +
+
+ +
+

+ + Login + +

+
+
+ + +
+
+ +
+ “The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.” + by + (about) + +
+ Tags: + + + change + + deep-thoughts + + thinking + + world + +
+
+ +
+ “It is our choices, Harry, that show what we truly are, far more than our abilities.” + by + (about) + +
+ Tags: + + + abilities + + choices + +
+
+ +
+ “There are only two ways to live your life. One is as though nothing is a miracle. The other is as though everything is a miracle.” + by + (about) + +
+ Tags: + + + inspirational + + life + + live + + miracle + + miracles + +
+
+ +
+ “The person, be it gentleman or lady, who has not pleasure in a good novel, must be intolerably stupid.” + by + (about) + +
+ Tags: + + + aliteracy + + books + + classic + + humor + +
+
+ +
+ “Imperfection is beauty, madness is genius and it's better to be absolutely ridiculous than absolutely boring.” + by + (about) + +
+ Tags: + + + be-yourself + + inspirational + +
+
+ +
+ “Try not to become a man of success. Rather become a man of value.” + by + (about) + +
+ Tags: + + + adulthood + + success + + value + +
+
+ +
+ “It is better to be hated for what you are than to be loved for what you are not.” + by + (about) + +
+ Tags: + + + life + + love + +
+
+ +
+ “I have not failed. I've just found 10,000 ways that won't work.” + by + (about) + +
+ Tags: + + + edison + + failure + + inspirational + + paraphrased + +
+
+ +
+ “A woman is like a tea bag; you never know how strong it is until it's in hot water.” + by + (about) + + +
+ +
+ “A day without sunshine is like, you know, night.” + by + (about) + +
+ Tags: + + + humor + + obvious + + simile + +
+
+ + +
+
+ +

Top Ten tags

+ + + love + + + + inspirational + + + + life + + + + humor + + + + books + + + + reading + + + + friendship + + + + friends + + + + truth + + + + simile + + + +
+
+ +
+ + + \ No newline at end of file diff --git a/scrapy-master/docs/_tests/quotes1.html b/scrapy-master/docs/_tests/quotes1.html new file mode 100644 index 0000000..71aff88 --- /dev/null +++ b/scrapy-master/docs/_tests/quotes1.html @@ -0,0 +1,281 @@ + + + + + Quotes to Scrape + + + + +
+
+ +
+

+ + Login + +

+
+
+ + +
+
+ +
+ “The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.” + by + (about) + +
+ Tags: + + + change + + deep-thoughts + + thinking + + world + +
+
+ +
+ “It is our choices, Harry, that show what we truly are, far more than our abilities.” + by + (about) + +
+ Tags: + + + abilities + + choices + +
+
+ +
+ “There are only two ways to live your life. One is as though nothing is a miracle. The other is as though everything is a miracle.” + by + (about) + +
+ Tags: + + + inspirational + + life + + live + + miracle + + miracles + +
+
+ +
+ “The person, be it gentleman or lady, who has not pleasure in a good novel, must be intolerably stupid.” + by + (about) + +
+ Tags: + + + aliteracy + + books + + classic + + humor + +
+
+ +
+ “Imperfection is beauty, madness is genius and it's better to be absolutely ridiculous than absolutely boring.” + by + (about) + +
+ Tags: + + + be-yourself + + inspirational + +
+
+ +
+ “Try not to become a man of success. Rather become a man of value.” + by + (about) + +
+ Tags: + + + adulthood + + success + + value + +
+
+ +
+ “It is better to be hated for what you are than to be loved for what you are not.” + by + (about) + +
+ Tags: + + + life + + love + +
+
+ +
+ “I have not failed. I've just found 10,000 ways that won't work.” + by + (about) + +
+ Tags: + + + edison + + failure + + inspirational + + paraphrased + +
+
+ +
+ “A woman is like a tea bag; you never know how strong it is until it's in hot water.” + by + (about) + + +
+ +
+ “A day without sunshine is like, you know, night.” + by + (about) + +
+ Tags: + + + humor + + obvious + + simile + +
+
+ + +
+
+ +

Top Ten tags

+ + + love + + + + inspirational + + + + life + + + + humor + + + + books + + + + reading + + + + friendship + + + + friends + + + + truth + + + + simile + + + +
+
+ +
+ + + \ No newline at end of file diff --git a/scrapy-master/docs/conf.py b/scrapy-master/docs/conf.py new file mode 100644 index 0000000..38ca819 --- /dev/null +++ b/scrapy-master/docs/conf.py @@ -0,0 +1,321 @@ +# Scrapy documentation build configuration file, created by +# sphinx-quickstart on Mon Nov 24 12:02:52 2008. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# The contents of this file are pickled, so don't put values in the namespace +# that aren't pickleable (module imports are okay, they're removed automatically). +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +from datetime import datetime +from pathlib import Path + +# If your extensions are in another directory, add it here. If the directory +# is relative to the documentation root, use Path.absolute to make it absolute. +sys.path.append(str(Path(__file__).parent / "_ext")) +sys.path.insert(0, str(Path(__file__).parent.parent)) + + +# General configuration +# --------------------- + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + "hoverxref.extension", + "notfound.extension", + "scrapydocs", + "sphinx.ext.autodoc", + "sphinx.ext.coverage", + "sphinx.ext.intersphinx", + "sphinx.ext.viewcode", +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix of source filenames. +source_suffix = ".rst" + +# The encoding of source files. +# source_encoding = 'utf-8' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = "Scrapy" +copyright = f"2008–{datetime.now().year}, Scrapy developers" + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +try: + import scrapy + + version = ".".join(map(str, scrapy.version_info[:2])) + release = scrapy.__version__ +except ImportError: + version = "" + release = "" + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +language = "en" + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of documents that shouldn't be included in the build. +# unused_docs = [] + +exclude_patterns = ["build"] + +# List of directories, relative to source directory, that shouldn't be searched +# for source files. +exclude_trees = [".build"] + +# The reST default role (used for this markup: `text`) to use for all documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# List of Sphinx warnings that will not be raised +suppress_warnings = ["epub.unknown_project_files"] + + +# Options for HTML output +# ----------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "sphinx_rtd_theme" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# Add path to the RTD explicitly to robustify builds (otherwise might +# fail in a clean Debian build env) +import sphinx_rtd_theme + +html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + +# The style sheet to use for HTML and HTML Help pages. A file of that name +# must exist either in Sphinx' static/ path, or in one of the custom paths +# given in html_static_path. +# html_style = 'scrapydoc.css' + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +html_last_updated_fmt = "%b %d, %Y" + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_use_modindex = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, the reST sources are included in the HTML build as _sources/. +html_copy_source = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = '' + +# Output file base name for HTML help builder. +htmlhelp_basename = "Scrapydoc" + +html_css_files = [ + "custom.css", +] + + +# Options for LaTeX output +# ------------------------ + +# The paper size ('letter' or 'a4'). +# latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +# latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, document class [howto/manual]). +latex_documents = [ + ("index", "Scrapy.tex", "Scrapy Documentation", "Scrapy developers", "manual"), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# Additional stuff for the LaTeX preamble. +# latex_preamble = '' + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_use_modindex = True + + +# Options for the linkcheck builder +# --------------------------------- + +# A list of regular expressions that match URIs that should not be checked when +# doing a linkcheck build. +linkcheck_ignore = [ + "http://localhost:\d+", + "http://hg.scrapy.org", + "http://directory.google.com/", +] + + +# Options for the Coverage extension +# ---------------------------------- +coverage_ignore_pyobjects = [ + # Contract’s add_pre_hook and add_post_hook are not documented because + # they should be transparent to contract developers, for whom pre_hook and + # post_hook should be the actual concern. + r"\bContract\.add_(pre|post)_hook$", + # ContractsManager is an internal class, developers are not expected to + # interact with it directly in any way. + r"\bContractsManager\b$", + # For default contracts we only want to document their general purpose in + # their __init__ method, the methods they reimplement to achieve that purpose + # should be irrelevant to developers using those contracts. + r"\w+Contract\.(adjust_request_args|(pre|post)_process)$", + # Methods of downloader middlewares are not documented, only the classes + # themselves, since downloader middlewares are controlled through Scrapy + # settings. + r"^scrapy\.downloadermiddlewares\.\w*?\.(\w*?Middleware|DownloaderStats)\.", + # Base classes of downloader middlewares are implementation details that + # are not meant for users. + r"^scrapy\.downloadermiddlewares\.\w*?\.Base\w*?Middleware", + # Private exception used by the command-line interface implementation. + r"^scrapy\.exceptions\.UsageError", + # Methods of BaseItemExporter subclasses are only documented in + # BaseItemExporter. + r"^scrapy\.exporters\.(?!BaseItemExporter\b)\w*?\.", + # Extension behavior is only modified through settings. Methods of + # extension classes, as well as helper functions, are implementation + # details that are not documented. + r"^scrapy\.extensions\.[a-z]\w*?\.[A-Z]\w*?\.", # methods + r"^scrapy\.extensions\.[a-z]\w*?\.[a-z]", # helper functions + # Never documented before, and deprecated now. + r"^scrapy\.linkextractors\.FilteringLinkExtractor$", + # Implementation detail of LxmlLinkExtractor + r"^scrapy\.linkextractors\.lxmlhtml\.LxmlParserLinkExtractor", +] + + +# Options for the InterSphinx extension +# ------------------------------------- + +intersphinx_mapping = { + "attrs": ("https://www.attrs.org/en/stable/", None), + "coverage": ("https://coverage.readthedocs.io/en/stable", None), + "cryptography": ("https://cryptography.io/en/latest/", None), + "cssselect": ("https://cssselect.readthedocs.io/en/latest", None), + "itemloaders": ("https://itemloaders.readthedocs.io/en/latest/", None), + "pytest": ("https://docs.pytest.org/en/latest", None), + "python": ("https://docs.python.org/3", None), + "sphinx": ("https://www.sphinx-doc.org/en/master", None), + "tox": ("https://tox.wiki/en/latest/", None), + "twisted": ("https://docs.twisted.org/en/stable/", None), + "twistedapi": ("https://docs.twisted.org/en/stable/api/", None), + "w3lib": ("https://w3lib.readthedocs.io/en/latest", None), +} +intersphinx_disabled_reftypes = [] + + +# Options for sphinx-hoverxref options +# ------------------------------------ + +hoverxref_auto_ref = True +hoverxref_role_types = { + "class": "tooltip", + "command": "tooltip", + "confval": "tooltip", + "hoverxref": "tooltip", + "mod": "tooltip", + "ref": "tooltip", + "reqmeta": "tooltip", + "setting": "tooltip", + "signal": "tooltip", +} +hoverxref_roles = ["command", "reqmeta", "setting", "signal"] + + +def setup(app): + app.connect("autodoc-skip-member", maybe_skip_member) + + +def maybe_skip_member(app, what, name, obj, skip, options): + if not skip: + # autodocs was generating a text "alias of" for the following members + # https://github.com/sphinx-doc/sphinx/issues/4422 + return name in {"default_item_class", "default_selector_class"} + return skip diff --git a/scrapy-master/docs/conftest.py b/scrapy-master/docs/conftest.py new file mode 100644 index 0000000..32f849a --- /dev/null +++ b/scrapy-master/docs/conftest.py @@ -0,0 +1,34 @@ +from doctest import ELLIPSIS, NORMALIZE_WHITESPACE +from pathlib import Path + +from sybil import Sybil +from sybil.parsers.doctest import DocTestParser +from sybil.parsers.skip import skip + +try: + # >2.0.1 + from sybil.parsers.codeblock import PythonCodeBlockParser +except ImportError: + from sybil.parsers.codeblock import CodeBlockParser as PythonCodeBlockParser + +from scrapy.http.response.html import HtmlResponse + + +def load_response(url: str, filename: str) -> HtmlResponse: + input_path = Path(__file__).parent / "_tests" / filename + return HtmlResponse(url, body=input_path.read_bytes()) + + +def setup(namespace): + namespace["load_response"] = load_response + + +pytest_collect_file = Sybil( + parsers=[ + DocTestParser(optionflags=ELLIPSIS | NORMALIZE_WHITESPACE), + PythonCodeBlockParser(future_imports=["print_function"]), + skip, + ], + pattern="*.rst", + setup=setup, +).pytest() diff --git a/scrapy-master/docs/contributing.rst b/scrapy-master/docs/contributing.rst new file mode 100644 index 0000000..6b1a413 --- /dev/null +++ b/scrapy-master/docs/contributing.rst @@ -0,0 +1,316 @@ +.. _topics-contributing: + +====================== +Contributing to Scrapy +====================== + +.. important:: + + Double check that you are reading the most recent version of this document at + https://docs.scrapy.org/en/master/contributing.html + +There are many ways to contribute to Scrapy. Here are some of them: + +* Blog about Scrapy. Tell the world how you're using Scrapy. This will help + newcomers with more examples and will help the Scrapy project to increase its + visibility. + +* Report bugs and request features in the `issue tracker`_, trying to follow + the guidelines detailed in `Reporting bugs`_ below. + +* Submit patches for new functionalities and/or bug fixes. Please read + :ref:`writing-patches` and `Submitting patches`_ below for details on how to + write and submit a patch. + +* Join the `Scrapy subreddit`_ and share your ideas on how to + improve Scrapy. We're always open to suggestions. + +* Answer Scrapy questions at + `Stack Overflow `__. + + +Reporting bugs +============== + +.. note:: + + Please report security issues **only** to + scrapy-security@googlegroups.com. This is a private list only open to + trusted Scrapy developers, and its archives are not public. + +Well-written bug reports are very helpful, so keep in mind the following +guidelines when you're going to report a new bug. + +* check the :ref:`FAQ ` first to see if your issue is addressed in a + well-known question + +* if you have a general question about Scrapy usage, please ask it at + `Stack Overflow `__ + (use "scrapy" tag). + +* check the `open issues`_ to see if the issue has already been reported. If it + has, don't dismiss the report, but check the ticket history and comments. If + you have additional useful information, please leave a comment, or consider + :ref:`sending a pull request ` with a fix. + +* search the `scrapy-users`_ list and `Scrapy subreddit`_ to see if it has + been discussed there, or if you're not sure if what you're seeing is a bug. + You can also ask in the ``#scrapy`` IRC channel. + +* write **complete, reproducible, specific bug reports**. The smaller the test + case, the better. Remember that other developers won't have your project to + reproduce the bug, so please include all relevant files required to reproduce + it. See for example StackOverflow's guide on creating a + `Minimal, Complete, and Verifiable example`_ exhibiting the issue. + +* the most awesome way to provide a complete reproducible example is to + send a pull request which adds a failing test case to the + Scrapy testing suite (see :ref:`submitting-patches`). + This is helpful even if you don't have an intention to + fix the issue yourselves. + +* include the output of ``scrapy version -v`` so developers working on your bug + know exactly which version and platform it occurred on, which is often very + helpful for reproducing it, or knowing if it was already fixed. + +.. _Minimal, Complete, and Verifiable example: https://stackoverflow.com/help/mcve + +.. _writing-patches: + +Writing patches +=============== + +The better a patch is written, the higher the chances that it'll get accepted and the sooner it will be merged. + +Well-written patches should: + +* contain the minimum amount of code required for the specific change. Small + patches are easier to review and merge. So, if you're doing more than one + change (or bug fix), please consider submitting one patch per change. Do not + collapse multiple changes into a single patch. For big changes consider using + a patch queue. + +* pass all unit-tests. See `Running tests`_ below. + +* include one (or more) test cases that check the bug fixed or the new + functionality added. See `Writing tests`_ below. + +* if you're adding or changing a public (documented) API, please include + the documentation changes in the same patch. See `Documentation policies`_ + below. + +* if you're adding a private API, please add a regular expression to the + ``coverage_ignore_pyobjects`` variable of ``docs/conf.py`` to exclude the new + private API from documentation coverage checks. + + To see if your private API is skipped properly, generate a documentation + coverage report as follows:: + + tox -e docs-coverage + +* if you are removing deprecated code, first make sure that at least 1 year + (12 months) has passed since the release that introduced the deprecation. + See :ref:`deprecation-policy`. + + +.. _submitting-patches: + +Submitting patches +================== + +The best way to submit a patch is to issue a `pull request`_ on GitHub, +optionally creating a new issue first. + +Remember to explain what was fixed or the new functionality (what it is, why +it's needed, etc). The more info you include, the easier will be for core +developers to understand and accept your patch. + +You can also discuss the new functionality (or bug fix) before creating the +patch, but it's always good to have a patch ready to illustrate your arguments +and show that you have put some additional thought into the subject. A good +starting point is to send a pull request on GitHub. It can be simple enough to +illustrate your idea, and leave documentation/tests for later, after the idea +has been validated and proven useful. Alternatively, you can start a +conversation in the `Scrapy subreddit`_ to discuss your idea first. + +Sometimes there is an existing pull request for the problem you'd like to +solve, which is stalled for some reason. Often the pull request is in a +right direction, but changes are requested by Scrapy maintainers, and the +original pull request author hasn't had time to address them. +In this case consider picking up this pull request: open +a new pull request with all commits from the original pull request, as well as +additional changes to address the raised issues. Doing so helps a lot; it is +not considered rude as long as the original author is acknowledged by keeping +his/her commits. + +You can pull an existing pull request to a local branch +by running ``git fetch upstream pull/$PR_NUMBER/head:$BRANCH_NAME_TO_CREATE`` +(replace 'upstream' with a remote name for scrapy repository, +``$PR_NUMBER`` with an ID of the pull request, and ``$BRANCH_NAME_TO_CREATE`` +with a name of the branch you want to create locally). +See also: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/checking-out-pull-requests-locally#modifying-an-inactive-pull-request-locally. + +When writing GitHub pull requests, try to keep titles short but descriptive. +E.g. For bug #411: "Scrapy hangs if an exception raises in start_requests" +prefer "Fix hanging when exception occurs in start_requests (#411)" +instead of "Fix for #411". Complete titles make it easy to skim through +the issue tracker. + +Finally, try to keep aesthetic changes (:pep:`8` compliance, unused imports +removal, etc) in separate commits from functional changes. This will make pull +requests easier to review and more likely to get merged. + + +.. _coding-style: + +Coding style +============ + +Please follow these coding conventions when writing code for inclusion in +Scrapy: + +* We use `black `_ for code formatting. + There is a hook in the pre-commit config + that will automatically format your code before every commit. You can also + run black manually with ``tox -e black``. + +* Don't put your name in the code you contribute; git provides enough + metadata to identify author of the code. + See https://help.github.com/en/github/using-git/setting-your-username-in-git for + setup instructions. + +.. _scrapy-pre-commit: + +Pre-commit +========== + +We use `pre-commit`_ to automatically address simple code issues before every +commit. + +.. _pre-commit: https://pre-commit.com/ + +After your create a local clone of your fork of the Scrapy repository: + +#. `Install pre-commit `_. + +#. On the root of your local clone of the Scrapy repository, run the following + command: + + .. code-block:: bash + + pre-commit install + +Now pre-commit will check your changes every time you create a Git commit. Upon +finding issues, pre-commit aborts your commit, and either fixes those issues +automatically, or only reports them to you. If it fixes those issues +automatically, creating your commit again should succeed. Otherwise, you may +need to address the corresponding issues manually first. + +.. _documentation-policies: + +Documentation policies +====================== + +For reference documentation of API members (classes, methods, etc.) use +docstrings and make sure that the Sphinx documentation uses the +:mod:`~sphinx.ext.autodoc` extension to pull the docstrings. API reference +documentation should follow docstring conventions (`PEP 257`_) and be +IDE-friendly: short, to the point, and it may provide short examples. + +Other types of documentation, such as tutorials or topics, should be covered in +files within the ``docs/`` directory. This includes documentation that is +specific to an API member, but goes beyond API reference documentation. + +In any case, if something is covered in a docstring, use the +:mod:`~sphinx.ext.autodoc` extension to pull the docstring into the +documentation instead of duplicating the docstring in files within the +``docs/`` directory. + +Documentation updates that cover new or modified features must use Sphinx’s +:rst:dir:`versionadded` and :rst:dir:`versionchanged` directives. Use +``VERSION`` as version, we will replace it with the actual version right before +the corresponding release. When we release a new major or minor version of +Scrapy, we remove these directives if they are older than 3 years. + +Documentation about deprecated features must be removed as those features are +deprecated, so that new readers do not run into it. New deprecations and +deprecation removals are documented in the :ref:`release notes `. + + +Tests +===== + +Tests are implemented using the :doc:`Twisted unit-testing framework +`. Running tests requires +:doc:`tox `. + +.. _running-tests: + +Running tests +------------- + +To run all tests:: + + tox + +To run a specific test (say ``tests/test_loader.py``) use: + + ``tox -- tests/test_loader.py`` + +To run the tests on a specific :doc:`tox ` environment, use +``-e `` with an environment name from ``tox.ini``. For example, to run +the tests with Python 3.7 use:: + + tox -e py37 + +You can also specify a comma-separated list of environments, and use :ref:`tox’s +parallel mode ` to run the tests on multiple environments in +parallel:: + + tox -e py37,py38 -p auto + +To pass command-line options to :doc:`pytest `, add them after +``--`` in your call to :doc:`tox `. Using ``--`` overrides the +default positional arguments defined in ``tox.ini``, so you must include those +default positional arguments (``scrapy tests``) after ``--`` as well:: + + tox -- scrapy tests -x # stop after first failure + +You can also use the `pytest-xdist`_ plugin. For example, to run all tests on +the Python 3.7 :doc:`tox ` environment using all your CPU cores:: + + tox -e py37 -- scrapy tests -n auto + +To see coverage report install :doc:`coverage ` +(``pip install coverage``) and run: + + ``coverage report`` + +see output of ``coverage --help`` for more options like html or xml report. + +Writing tests +------------- + +All functionality (including new features and bug fixes) must include a test +case to check that it works as expected, so please include tests for your +patches if you want them to get accepted sooner. + +Scrapy uses unit-tests, which are located in the `tests/`_ directory. +Their module name typically resembles the full path of the module they're +testing. For example, the item loaders code is in:: + + scrapy.loader + +And their unit-tests are in:: + + tests/test_loader.py + +.. _issue tracker: https://github.com/scrapy/scrapy/issues +.. _scrapy-users: https://groups.google.com/forum/#!forum/scrapy-users +.. _Scrapy subreddit: https://reddit.com/r/scrapy +.. _AUTHORS: https://github.com/scrapy/scrapy/blob/master/AUTHORS +.. _tests/: https://github.com/scrapy/scrapy/tree/master/tests +.. _open issues: https://github.com/scrapy/scrapy/issues +.. _PEP 257: https://www.python.org/dev/peps/pep-0257/ +.. _pull request: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request +.. _pytest-xdist: https://github.com/pytest-dev/pytest-xdist diff --git a/scrapy-master/docs/faq.rst b/scrapy-master/docs/faq.rst new file mode 100644 index 0000000..031f4b9 --- /dev/null +++ b/scrapy-master/docs/faq.rst @@ -0,0 +1,423 @@ +.. _faq: + +Frequently Asked Questions +========================== + +.. _faq-scrapy-bs-cmp: + +How does Scrapy compare to BeautifulSoup or lxml? +------------------------------------------------- + +`BeautifulSoup`_ and `lxml`_ are libraries for parsing HTML and XML. Scrapy is +an application framework for writing web spiders that crawl web sites and +extract data from them. + +Scrapy provides a built-in mechanism for extracting data (called +:ref:`selectors `) but you can easily use `BeautifulSoup`_ +(or `lxml`_) instead, if you feel more comfortable working with them. After +all, they're just parsing libraries which can be imported and used from any +Python code. + +In other words, comparing `BeautifulSoup`_ (or `lxml`_) to Scrapy is like +comparing `jinja2`_ to `Django`_. + +.. _BeautifulSoup: https://www.crummy.com/software/BeautifulSoup/ +.. _lxml: https://lxml.de/ +.. _jinja2: https://palletsprojects.com/p/jinja/ +.. _Django: https://www.djangoproject.com/ + +Can I use Scrapy with BeautifulSoup? +------------------------------------ + +Yes, you can. +As mentioned :ref:`above `, `BeautifulSoup`_ can be used +for parsing HTML responses in Scrapy callbacks. +You just have to feed the response's body into a ``BeautifulSoup`` object +and extract whatever data you need from it. + +Here's an example spider using BeautifulSoup API, with ``lxml`` as the HTML parser: + +.. skip: next +.. code-block:: python + + from bs4 import BeautifulSoup + import scrapy + + + class ExampleSpider(scrapy.Spider): + name = "example" + allowed_domains = ["example.com"] + start_urls = ("http://www.example.com/",) + + def parse(self, response): + # use lxml to get decent HTML parsing speed + soup = BeautifulSoup(response.text, "lxml") + yield {"url": response.url, "title": soup.h1.string} + +.. note:: + + ``BeautifulSoup`` supports several HTML/XML parsers. + See `BeautifulSoup's official documentation`_ on which ones are available. + +.. _BeautifulSoup's official documentation: https://www.crummy.com/software/BeautifulSoup/bs4/doc/#specifying-the-parser-to-use + + +Did Scrapy "steal" X from Django? +--------------------------------- + +Probably, but we don't like that word. We think Django_ is a great open source +project and an example to follow, so we've used it as an inspiration for +Scrapy. + +We believe that, if something is already done well, there's no need to reinvent +it. This concept, besides being one of the foundations for open source and free +software, not only applies to software but also to documentation, procedures, +policies, etc. So, instead of going through each problem ourselves, we choose +to copy ideas from those projects that have already solved them properly, and +focus on the real problems we need to solve. + +We'd be proud if Scrapy serves as an inspiration for other projects. Feel free +to steal from us! + +Does Scrapy work with HTTP proxies? +----------------------------------- + +Yes. Support for HTTP proxies is provided (since Scrapy 0.8) through the HTTP +Proxy downloader middleware. See +:class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware`. + +How can I scrape an item with attributes in different pages? +------------------------------------------------------------ + +See :ref:`topics-request-response-ref-request-callback-arguments`. + +How can I simulate a user login in my spider? +--------------------------------------------- + +See :ref:`topics-request-response-ref-request-userlogin`. + +.. _faq-bfo-dfo: + +Does Scrapy crawl in breadth-first or depth-first order? +-------------------------------------------------------- + +By default, Scrapy uses a `LIFO`_ queue for storing pending requests, which +basically means that it crawls in `DFO order`_. This order is more convenient +in most cases. + +If you do want to crawl in true `BFO order`_, you can do it by +setting the following settings: + +.. code-block:: python + + DEPTH_PRIORITY = 1 + SCHEDULER_DISK_QUEUE = "scrapy.squeues.PickleFifoDiskQueue" + SCHEDULER_MEMORY_QUEUE = "scrapy.squeues.FifoMemoryQueue" + +While pending requests are below the configured values of +:setting:`CONCURRENT_REQUESTS`, :setting:`CONCURRENT_REQUESTS_PER_DOMAIN` or +:setting:`CONCURRENT_REQUESTS_PER_IP`, those requests are sent +concurrently. As a result, the first few requests of a crawl rarely follow the +desired order. Lowering those settings to ``1`` enforces the desired order, but +it significantly slows down the crawl as a whole. + + +My Scrapy crawler has memory leaks. What can I do? +-------------------------------------------------- + +See :ref:`topics-leaks`. + +Also, Python has a builtin memory leak issue which is described in +:ref:`topics-leaks-without-leaks`. + +How can I make Scrapy consume less memory? +------------------------------------------ + +See previous question. + +How can I prevent memory errors due to many allowed domains? +------------------------------------------------------------ + +If you have a spider with a long list of +:attr:`~scrapy.Spider.allowed_domains` (e.g. 50,000+), consider +replacing the default +:class:`~scrapy.spidermiddlewares.offsite.OffsiteMiddleware` spider middleware +with a :ref:`custom spider middleware ` that requires +less memory. For example: + +- If your domain names are similar enough, use your own regular expression + instead joining the strings in + :attr:`~scrapy.Spider.allowed_domains` into a complex regular + expression. + +- If you can `meet the installation requirements`_, use pyre2_ instead of + Python’s re_ to compile your URL-filtering regular expression. See + :issue:`1908`. + +See also other suggestions at `StackOverflow`_. + +.. note:: Remember to disable + :class:`scrapy.spidermiddlewares.offsite.OffsiteMiddleware` when you enable + your custom implementation: + + .. code-block:: python + + SPIDER_MIDDLEWARES = { + "scrapy.spidermiddlewares.offsite.OffsiteMiddleware": None, + "myproject.middlewares.CustomOffsiteMiddleware": 500, + } + +.. _meet the installation requirements: https://github.com/andreasvc/pyre2#installation +.. _pyre2: https://github.com/andreasvc/pyre2 +.. _re: https://docs.python.org/library/re.html +.. _StackOverflow: https://stackoverflow.com/q/36440681/939364 + +Can I use Basic HTTP Authentication in my spiders? +-------------------------------------------------- + +Yes, see :class:`~scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware`. + +Why does Scrapy download pages in English instead of my native language? +------------------------------------------------------------------------ + +Try changing the default `Accept-Language`_ request header by overriding the +:setting:`DEFAULT_REQUEST_HEADERS` setting. + +.. _Accept-Language: https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4 + +Where can I find some example Scrapy projects? +---------------------------------------------- + +See :ref:`intro-examples`. + +Can I run a spider without creating a project? +---------------------------------------------- + +Yes. You can use the :command:`runspider` command. For example, if you have a +spider written in a ``my_spider.py`` file you can run it with:: + + scrapy runspider my_spider.py + +See :command:`runspider` command for more info. + +I get "Filtered offsite request" messages. How can I fix them? +-------------------------------------------------------------- + +Those messages (logged with ``DEBUG`` level) don't necessarily mean there is a +problem, so you may not need to fix them. + +Those messages are thrown by the Offsite Spider Middleware, which is a spider +middleware (enabled by default) whose purpose is to filter out requests to +domains outside the ones covered by the spider. + +For more info see: +:class:`~scrapy.spidermiddlewares.offsite.OffsiteMiddleware`. + +What is the recommended way to deploy a Scrapy crawler in production? +--------------------------------------------------------------------- + +See :ref:`topics-deploy`. + +Can I use JSON for large exports? +--------------------------------- + +It'll depend on how large your output is. See :ref:`this warning +` in :class:`~scrapy.exporters.JsonItemExporter` +documentation. + +Can I return (Twisted) deferreds from signal handlers? +------------------------------------------------------ + +Some signals support returning deferreds from their handlers, others don't. See +the :ref:`topics-signals-ref` to know which ones. + +What does the response status code 999 means? +--------------------------------------------- + +999 is a custom response status code used by Yahoo sites to throttle requests. +Try slowing down the crawling speed by using a download delay of ``2`` (or +higher) in your spider: + +.. code-block:: python + + from scrapy.spiders import CrawlSpider + + + class MySpider(CrawlSpider): + name = "myspider" + + download_delay = 2 + + # [ ... rest of the spider code ... ] + +Or by setting a global download delay in your project with the +:setting:`DOWNLOAD_DELAY` setting. + +Can I call ``pdb.set_trace()`` from my spiders to debug them? +------------------------------------------------------------- + +Yes, but you can also use the Scrapy shell which allows you to quickly analyze +(and even modify) the response being processed by your spider, which is, quite +often, more useful than plain old ``pdb.set_trace()``. + +For more info see :ref:`topics-shell-inspect-response`. + +Simplest way to dump all my scraped items into a JSON/CSV/XML file? +------------------------------------------------------------------- + +To dump into a JSON file:: + + scrapy crawl myspider -O items.json + +To dump into a CSV file:: + + scrapy crawl myspider -O items.csv + +To dump into a XML file:: + + scrapy crawl myspider -O items.xml + +For more information see :ref:`topics-feed-exports` + +What's this huge cryptic ``__VIEWSTATE`` parameter used in some forms? +---------------------------------------------------------------------- + +The ``__VIEWSTATE`` parameter is used in sites built with ASP.NET/VB.NET. For +more info on how it works see `this page`_. Also, here's an `example spider`_ +which scrapes one of these sites. + +.. _this page: https://metacpan.org/pod/release/ECARROLL/HTML-TreeBuilderX-ASP_NET-0.09/lib/HTML/TreeBuilderX/ASP_NET.pm +.. _example spider: https://github.com/AmbientLighter/rpn-fas/blob/master/fas/spiders/rnp.py + +What's the best way to parse big XML/CSV data feeds? +---------------------------------------------------- + +Parsing big feeds with XPath selectors can be problematic since they need to +build the DOM of the entire feed in memory, and this can be quite slow and +consume a lot of memory. + +In order to avoid parsing all the entire feed at once in memory, you can use +the functions ``xmliter`` and ``csviter`` from ``scrapy.utils.iterators`` +module. In fact, this is what the feed spiders (see :ref:`topics-spiders`) use +under the cover. + +Does Scrapy manage cookies automatically? +----------------------------------------- + +Yes, Scrapy receives and keeps track of cookies sent by servers, and sends them +back on subsequent requests, like any regular web browser does. + +For more info see :ref:`topics-request-response` and :ref:`cookies-mw`. + +How can I see the cookies being sent and received from Scrapy? +-------------------------------------------------------------- + +Enable the :setting:`COOKIES_DEBUG` setting. + +How can I instruct a spider to stop itself? +------------------------------------------- + +Raise the :exc:`~scrapy.exceptions.CloseSpider` exception from a callback. For +more info see: :exc:`~scrapy.exceptions.CloseSpider`. + +How can I prevent my Scrapy bot from getting banned? +---------------------------------------------------- + +See :ref:`bans`. + +Should I use spider arguments or settings to configure my spider? +----------------------------------------------------------------- + +Both :ref:`spider arguments ` and :ref:`settings ` +can be used to configure your spider. There is no strict rule that mandates to +use one or the other, but settings are more suited for parameters that, once +set, don't change much, while spider arguments are meant to change more often, +even on each spider run and sometimes are required for the spider to run at all +(for example, to set the start url of a spider). + +To illustrate with an example, assuming you have a spider that needs to log +into a site to scrape data, and you only want to scrape data from a certain +section of the site (which varies each time). In that case, the credentials to +log in would be settings, while the url of the section to scrape would be a +spider argument. + +I'm scraping a XML document and my XPath selector doesn't return any items +-------------------------------------------------------------------------- + +You may need to remove namespaces. See :ref:`removing-namespaces`. + + +.. _faq-split-item: + +How to split an item into multiple items in an item pipeline? +------------------------------------------------------------- + +:ref:`Item pipelines ` cannot yield multiple items per +input item. :ref:`Create a spider middleware ` +instead, and use its +:meth:`~scrapy.spidermiddlewares.SpiderMiddleware.process_spider_output` +method for this purpose. For example: + +.. code-block:: python + + from copy import deepcopy + + from itemadapter import is_item, ItemAdapter + + + class MultiplyItemsMiddleware: + def process_spider_output(self, response, result, spider): + for item in result: + if is_item(item): + adapter = ItemAdapter(item) + for _ in range(adapter["multiply_by"]): + yield deepcopy(item) + +Does Scrapy support IPv6 addresses? +----------------------------------- + +Yes, by setting :setting:`DNS_RESOLVER` to ``scrapy.resolver.CachingHostnameResolver``. +Note that by doing so, you lose the ability to set a specific timeout for DNS requests +(the value of the :setting:`DNS_TIMEOUT` setting is ignored). + + +.. _faq-specific-reactor: + +How to deal with ``: filedescriptor out of range in select()`` exceptions? +---------------------------------------------------------------------------------------------- + +This issue `has been reported`_ to appear when running broad crawls in macOS, where the default +Twisted reactor is :class:`twisted.internet.selectreactor.SelectReactor`. Switching to a +different reactor is possible by using the :setting:`TWISTED_REACTOR` setting. + + +.. _faq-stop-response-download: + +How can I cancel the download of a given response? +-------------------------------------------------- + +In some situations, it might be useful to stop the download of a certain response. +For instance, sometimes you can determine whether or not you need the full contents +of a response by inspecting its headers or the first bytes of its body. In that case, +you could save resources by attaching a handler to the :class:`~scrapy.signals.bytes_received` +or :class:`~scrapy.signals.headers_received` signals and raising a +:exc:`~scrapy.exceptions.StopDownload` exception. Please refer to the +:ref:`topics-stop-response-download` topic for additional information and examples. + + +Running ``runspider`` I get ``error: No spider found in file: `` +-------------------------------------------------------------------------- + +This may happen if your Scrapy project has a spider module with a name that +conflicts with the name of one of the `Python standard library modules`_, such +as ``csv.py`` or ``os.py``, or any `Python package`_ that you have installed. +See :issue:`2680`. + + +.. _has been reported: https://github.com/scrapy/scrapy/issues/2905 +.. _Python standard library modules: https://docs.python.org/py-modindex.html +.. _Python package: https://pypi.org/ +.. _user agents: https://en.wikipedia.org/wiki/User_agent +.. _LIFO: https://en.wikipedia.org/wiki/Stack_(abstract_data_type) +.. _DFO order: https://en.wikipedia.org/wiki/Depth-first_search +.. _BFO order: https://en.wikipedia.org/wiki/Breadth-first_search diff --git a/scrapy-master/docs/index.rst b/scrapy-master/docs/index.rst new file mode 100644 index 0000000..5404969 --- /dev/null +++ b/scrapy-master/docs/index.rst @@ -0,0 +1,282 @@ +.. _topics-index: + +============================== +Scrapy |version| documentation +============================== + +Scrapy is a fast high-level `web crawling`_ and `web scraping`_ framework, used +to crawl websites and extract structured data from their pages. It can be used +for a wide range of purposes, from data mining to monitoring and automated +testing. + +.. _web crawling: https://en.wikipedia.org/wiki/Web_crawler +.. _web scraping: https://en.wikipedia.org/wiki/Web_scraping + +.. _getting-help: + +Getting help +============ + +Having trouble? We'd like to help! + +* Try the :doc:`FAQ ` -- it's got answers to some common questions. +* Looking for specific information? Try the :ref:`genindex` or :ref:`modindex`. +* Ask or search questions in `StackOverflow using the scrapy tag`_. +* Ask or search questions in the `Scrapy subreddit`_. +* Search for questions on the archives of the `scrapy-users mailing list`_. +* Ask a question in the `#scrapy IRC channel`_, +* Report bugs with Scrapy in our `issue tracker`_. +* Join the Discord community `Scrapy Discord`_. + +.. _scrapy-users mailing list: https://groups.google.com/forum/#!forum/scrapy-users +.. _Scrapy subreddit: https://www.reddit.com/r/scrapy/ +.. _StackOverflow using the scrapy tag: https://stackoverflow.com/tags/scrapy +.. _#scrapy IRC channel: irc://irc.freenode.net/scrapy +.. _issue tracker: https://github.com/scrapy/scrapy/issues +.. _Scrapy Discord: https://discord.gg/mv3yErfpvq + + +First steps +=========== + +.. toctree:: + :caption: First steps + :hidden: + + intro/overview + intro/install + intro/tutorial + intro/examples + +:doc:`intro/overview` + Understand what Scrapy is and how it can help you. + +:doc:`intro/install` + Get Scrapy installed on your computer. + +:doc:`intro/tutorial` + Write your first Scrapy project. + +:doc:`intro/examples` + Learn more by playing with a pre-made Scrapy project. + +.. _section-basics: + +Basic concepts +============== + +.. toctree:: + :caption: Basic concepts + :hidden: + + topics/commands + topics/spiders + topics/selectors + topics/items + topics/loaders + topics/shell + topics/item-pipeline + topics/feed-exports + topics/request-response + topics/link-extractors + topics/settings + topics/exceptions + +:doc:`topics/commands` + Learn about the command-line tool used to manage your Scrapy project. + +:doc:`topics/spiders` + Write the rules to crawl your websites. + +:doc:`topics/selectors` + Extract the data from web pages using XPath. + +:doc:`topics/shell` + Test your extraction code in an interactive environment. + +:doc:`topics/items` + Define the data you want to scrape. + +:doc:`topics/loaders` + Populate your items with the extracted data. + +:doc:`topics/item-pipeline` + Post-process and store your scraped data. + +:doc:`topics/feed-exports` + Output your scraped data using different formats and storages. + +:doc:`topics/request-response` + Understand the classes used to represent HTTP requests and responses. + +:doc:`topics/link-extractors` + Convenient classes to extract links to follow from pages. + +:doc:`topics/settings` + Learn how to configure Scrapy and see all :ref:`available settings `. + +:doc:`topics/exceptions` + See all available exceptions and their meaning. + + +Built-in services +================= + +.. toctree:: + :caption: Built-in services + :hidden: + + topics/logging + topics/stats + topics/email + topics/telnetconsole + +:doc:`topics/logging` + Learn how to use Python's builtin logging on Scrapy. + +:doc:`topics/stats` + Collect statistics about your scraping crawler. + +:doc:`topics/email` + Send email notifications when certain events occur. + +:doc:`topics/telnetconsole` + Inspect a running crawler using a built-in Python console. + + +Solving specific problems +========================= + +.. toctree:: + :caption: Solving specific problems + :hidden: + + faq + topics/debug + topics/contracts + topics/practices + topics/broad-crawls + topics/developer-tools + topics/dynamic-content + topics/leaks + topics/media-pipeline + topics/deploy + topics/autothrottle + topics/benchmarking + topics/jobs + topics/coroutines + topics/asyncio + +:doc:`faq` + Get answers to most frequently asked questions. + +:doc:`topics/debug` + Learn how to debug common problems of your Scrapy spider. + +:doc:`topics/contracts` + Learn how to use contracts for testing your spiders. + +:doc:`topics/practices` + Get familiar with some Scrapy common practices. + +:doc:`topics/broad-crawls` + Tune Scrapy for crawling a lot domains in parallel. + +:doc:`topics/developer-tools` + Learn how to scrape with your browser's developer tools. + +:doc:`topics/dynamic-content` + Read webpage data that is loaded dynamically. + +:doc:`topics/leaks` + Learn how to find and get rid of memory leaks in your crawler. + +:doc:`topics/media-pipeline` + Download files and/or images associated with your scraped items. + +:doc:`topics/deploy` + Deploying your Scrapy spiders and run them in a remote server. + +:doc:`topics/autothrottle` + Adjust crawl rate dynamically based on load. + +:doc:`topics/benchmarking` + Check how Scrapy performs on your hardware. + +:doc:`topics/jobs` + Learn how to pause and resume crawls for large spiders. + +:doc:`topics/coroutines` + Use the :ref:`coroutine syntax `. + +:doc:`topics/asyncio` + Use :mod:`asyncio` and :mod:`asyncio`-powered libraries. + +.. _extending-scrapy: + +Extending Scrapy +================ + +.. toctree:: + :caption: Extending Scrapy + :hidden: + + topics/architecture + topics/downloader-middleware + topics/spider-middleware + topics/extensions + topics/signals + topics/scheduler + topics/exporters + topics/components + topics/api + + +:doc:`topics/architecture` + Understand the Scrapy architecture. + +:doc:`topics/downloader-middleware` + Customize how pages get requested and downloaded. + +:doc:`topics/spider-middleware` + Customize the input and output of your spiders. + +:doc:`topics/extensions` + Extend Scrapy with your custom functionality + +:doc:`topics/signals` + See all available signals and how to work with them. + +:doc:`topics/scheduler` + Understand the scheduler component. + +:doc:`topics/exporters` + Quickly export your scraped items to a file (XML, CSV, etc). + +:doc:`topics/components` + Learn the common API and some good practices when building custom Scrapy + components. + +:doc:`topics/api` + Use it on extensions and middlewares to extend Scrapy functionality. + + +All the rest +============ + +.. toctree:: + :caption: All the rest + :hidden: + + news + contributing + versioning + +:doc:`news` + See what has changed in recent Scrapy versions. + +:doc:`contributing` + Learn how to contribute to the Scrapy project. + +:doc:`versioning` + Understand Scrapy versioning and API stability. diff --git a/scrapy-master/docs/intro/examples.rst b/scrapy-master/docs/intro/examples.rst new file mode 100644 index 0000000..edff894 --- /dev/null +++ b/scrapy-master/docs/intro/examples.rst @@ -0,0 +1,20 @@ +.. _intro-examples: + +======== +Examples +======== + +The best way to learn is with examples, and Scrapy is no exception. For this +reason, there is an example Scrapy project named quotesbot_, that you can use to +play and learn more about Scrapy. It contains two spiders for +https://quotes.toscrape.com, one using CSS selectors and another one using XPath +expressions. + +The quotesbot_ project is available at: https://github.com/scrapy/quotesbot. +You can find more information about it in the project's README. + +If you're familiar with git, you can checkout the code. Otherwise you can +download the project as a zip file by clicking +`here `_. + +.. _quotesbot: https://github.com/scrapy/quotesbot diff --git a/scrapy-master/docs/intro/install.rst b/scrapy-master/docs/intro/install.rst new file mode 100644 index 0000000..2c2079f --- /dev/null +++ b/scrapy-master/docs/intro/install.rst @@ -0,0 +1,285 @@ +.. _intro-install: + +================== +Installation guide +================== + +.. _faq-python-versions: + +Supported Python versions +========================= + +Scrapy requires Python 3.7+, either the CPython implementation (default) or +the PyPy implementation (see :ref:`python:implementations`). + +.. _intro-install-scrapy: + +Installing Scrapy +================= + +If you're using `Anaconda`_ or `Miniconda`_, you can install the package from +the `conda-forge`_ channel, which has up-to-date packages for Linux, Windows +and macOS. + +To install Scrapy using ``conda``, run:: + + conda install -c conda-forge scrapy + +Alternatively, if you’re already familiar with installation of Python packages, +you can install Scrapy and its dependencies from PyPI with:: + + pip install Scrapy + +We strongly recommend that you install Scrapy in :ref:`a dedicated virtualenv `, +to avoid conflicting with your system packages. + +Note that sometimes this may require solving compilation issues for some Scrapy +dependencies depending on your operating system, so be sure to check the +:ref:`intro-install-platform-notes`. + +For more detailed and platform specifics instructions, as well as +troubleshooting information, read on. + + +Things that are good to know +---------------------------- + +Scrapy is written in pure Python and depends on a few key Python packages (among others): + +* `lxml`_, an efficient XML and HTML parser +* `parsel`_, an HTML/XML data extraction library written on top of lxml, +* `w3lib`_, a multi-purpose helper for dealing with URLs and web page encodings +* `twisted`_, an asynchronous networking framework +* `cryptography`_ and `pyOpenSSL`_, to deal with various network-level security needs + +Some of these packages themselves depend on non-Python packages +that might require additional installation steps depending on your platform. +Please check :ref:`platform-specific guides below `. + +In case of any trouble related to these dependencies, +please refer to their respective installation instructions: + +* `lxml installation`_ +* :doc:`cryptography installation ` + +.. _lxml installation: https://lxml.de/installation.html + + +.. _intro-using-virtualenv: + +Using a virtual environment (recommended) +----------------------------------------- + +TL;DR: We recommend installing Scrapy inside a virtual environment +on all platforms. + +Python packages can be installed either globally (a.k.a system wide), +or in user-space. We do not recommend installing Scrapy system wide. + +Instead, we recommend that you install Scrapy within a so-called +"virtual environment" (:mod:`venv`). +Virtual environments allow you to not conflict with already-installed Python +system packages (which could break some of your system tools and scripts), +and still install packages normally with ``pip`` (without ``sudo`` and the likes). + +See :ref:`tut-venv` on how to create your virtual environment. + +Once you have created a virtual environment, you can install Scrapy inside it with ``pip``, +just like any other Python package. +(See :ref:`platform-specific guides ` +below for non-Python dependencies that you may need to install beforehand). + + +.. _intro-install-platform-notes: + +Platform specific installation notes +==================================== + +.. _intro-install-windows: + +Windows +------- + +Though it's possible to install Scrapy on Windows using pip, we recommend you +to install `Anaconda`_ or `Miniconda`_ and use the package from the +`conda-forge`_ channel, which will avoid most installation issues. + +Once you've installed `Anaconda`_ or `Miniconda`_, install Scrapy with:: + + conda install -c conda-forge scrapy + +To install Scrapy on Windows using ``pip``: + +.. warning:: + This installation method requires “Microsoft Visual C++” for installing some + Scrapy dependencies, which demands significantly more disk space than Anaconda. + +#. Download and execute `Microsoft C++ Build Tools`_ to install the Visual Studio Installer. + +#. Run the Visual Studio Installer. + +#. Under the Workloads section, select **C++ build tools**. + +#. Check the installation details and make sure following packages are selected as optional components: + + * **MSVC** (e.g MSVC v142 - VS 2019 C++ x64/x86 build tools (v14.23) ) + + * **Windows SDK** (e.g Windows 10 SDK (10.0.18362.0)) + +#. Install the Visual Studio Build Tools. + +Now, you should be able to :ref:`install Scrapy ` using ``pip``. + +.. _intro-install-ubuntu: + +Ubuntu 14.04 or above +--------------------- + +Scrapy is currently tested with recent-enough versions of lxml, +twisted and pyOpenSSL, and is compatible with recent Ubuntu distributions. +But it should support older versions of Ubuntu too, like Ubuntu 14.04, +albeit with potential issues with TLS connections. + +**Don't** use the ``python-scrapy`` package provided by Ubuntu, they are +typically too old and slow to catch up with latest Scrapy. + + +To install Scrapy on Ubuntu (or Ubuntu-based) systems, you need to install +these dependencies:: + + sudo apt-get install python3 python3-dev python3-pip libxml2-dev libxslt1-dev zlib1g-dev libffi-dev libssl-dev + +- ``python3-dev``, ``zlib1g-dev``, ``libxml2-dev`` and ``libxslt1-dev`` + are required for ``lxml`` +- ``libssl-dev`` and ``libffi-dev`` are required for ``cryptography`` + +Inside a :ref:`virtualenv `, +you can install Scrapy with ``pip`` after that:: + + pip install scrapy + +.. note:: + The same non-Python dependencies can be used to install Scrapy in Debian + Jessie (8.0) and above. + + +.. _intro-install-macos: + +macOS +----- + +Building Scrapy's dependencies requires the presence of a C compiler and +development headers. On macOS this is typically provided by Apple’s Xcode +development tools. To install the Xcode command line tools open a terminal +window and run:: + + xcode-select --install + +There's a `known issue `_ that +prevents ``pip`` from updating system packages. This has to be addressed to +successfully install Scrapy and its dependencies. Here are some proposed +solutions: + +* *(Recommended)* **Don't** use system Python. Install a new, updated version + that doesn't conflict with the rest of your system. Here's how to do it using + the `homebrew`_ package manager: + + * Install `homebrew`_ following the instructions in https://brew.sh/ + + * Update your ``PATH`` variable to state that homebrew packages should be + used before system packages (Change ``.bashrc`` to ``.zshrc`` accordingly + if you're using `zsh`_ as default shell):: + + echo "export PATH=/usr/local/bin:/usr/local/sbin:$PATH" >> ~/.bashrc + + * Reload ``.bashrc`` to ensure the changes have taken place:: + + source ~/.bashrc + + * Install python:: + + brew install python + + * Latest versions of python have ``pip`` bundled with them so you won't need + to install it separately. If this is not the case, upgrade python:: + + brew update; brew upgrade python + +* *(Optional)* :ref:`Install Scrapy inside a Python virtual environment + `. + + This method is a workaround for the above macOS issue, but it's an overall + good practice for managing dependencies and can complement the first method. + +After any of these workarounds you should be able to install Scrapy:: + + pip install Scrapy + + +PyPy +---- + +We recommend using the latest PyPy version. +For PyPy3, only Linux installation was tested. + +Most Scrapy dependencies now have binary wheels for CPython, but not for PyPy. +This means that these dependencies will be built during installation. +On macOS, you are likely to face an issue with building the Cryptography +dependency. The solution to this problem is described +`here `_, +that is to ``brew install openssl`` and then export the flags that this command +recommends (only needed when installing Scrapy). Installing on Linux has no special +issues besides installing build dependencies. +Installing Scrapy with PyPy on Windows is not tested. + +You can check that Scrapy is installed correctly by running ``scrapy bench``. +If this command gives errors such as +``TypeError: ... got 2 unexpected keyword arguments``, this means +that setuptools was unable to pick up one PyPy-specific dependency. +To fix this issue, run ``pip install 'PyPyDispatcher>=2.1.0'``. + + +.. _intro-install-troubleshooting: + +Troubleshooting +=============== + +AttributeError: 'module' object has no attribute 'OP_NO_TLSv1_1' +---------------------------------------------------------------- + +After you install or upgrade Scrapy, Twisted or pyOpenSSL, you may get an +exception with the following traceback:: + + […] + File "[…]/site-packages/twisted/protocols/tls.py", line 63, in + from twisted.internet._sslverify import _setAcceptableProtocols + File "[…]/site-packages/twisted/internet/_sslverify.py", line 38, in + TLSVersion.TLSv1_1: SSL.OP_NO_TLSv1_1, + AttributeError: 'module' object has no attribute 'OP_NO_TLSv1_1' + +The reason you get this exception is that your system or virtual environment +has a version of pyOpenSSL that your version of Twisted does not support. + +To install a version of pyOpenSSL that your version of Twisted supports, +reinstall Twisted with the :code:`tls` extra option:: + + pip install twisted[tls] + +For details, see `Issue #2473 `_. + +.. _Python: https://www.python.org/ +.. _pip: https://pip.pypa.io/en/latest/installing/ +.. _lxml: https://lxml.de/index.html +.. _parsel: https://pypi.org/project/parsel/ +.. _w3lib: https://pypi.org/project/w3lib/ +.. _twisted: https://twistedmatrix.com/trac/ +.. _cryptography: https://cryptography.io/en/latest/ +.. _pyOpenSSL: https://pypi.org/project/pyOpenSSL/ +.. _setuptools: https://pypi.python.org/pypi/setuptools +.. _homebrew: https://brew.sh/ +.. _zsh: https://www.zsh.org/ +.. _Anaconda: https://docs.anaconda.com/anaconda/ +.. _Miniconda: https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html +.. _Visual Studio: https://docs.microsoft.com/en-us/visualstudio/install/install-visual-studio +.. _Microsoft C++ Build Tools: https://visualstudio.microsoft.com/visual-cpp-build-tools/ +.. _conda-forge: https://conda-forge.org/ diff --git a/scrapy-master/docs/intro/overview.rst b/scrapy-master/docs/intro/overview.rst new file mode 100644 index 0000000..542760b --- /dev/null +++ b/scrapy-master/docs/intro/overview.rst @@ -0,0 +1,157 @@ +.. _intro-overview: + +================== +Scrapy at a glance +================== + +Scrapy (/ˈskreɪpaɪ/) is an application framework for crawling web sites and extracting +structured data which can be used for a wide range of useful applications, like +data mining, information processing or historical archival. + +Even though Scrapy was originally designed for `web scraping`_, it can also be +used to extract data using APIs (such as `Amazon Associates Web Services`_) or +as a general purpose web crawler. + + +Walk-through of an example spider +================================= + +In order to show you what Scrapy brings to the table, we'll walk you through an +example of a Scrapy Spider using the simplest way to run a spider. + +Here's the code for a spider that scrapes famous quotes from website +https://quotes.toscrape.com, following the pagination: + +.. code-block:: python + + import scrapy + + + class QuotesSpider(scrapy.Spider): + name = "quotes" + start_urls = [ + "https://quotes.toscrape.com/tag/humor/", + ] + + def parse(self, response): + for quote in response.css("div.quote"): + yield { + "author": quote.xpath("span/small/text()").get(), + "text": quote.css("span.text::text").get(), + } + + next_page = response.css('li.next a::attr("href")').get() + if next_page is not None: + yield response.follow(next_page, self.parse) + +Put this in a text file, name it to something like ``quotes_spider.py`` +and run the spider using the :command:`runspider` command:: + + scrapy runspider quotes_spider.py -o quotes.jsonl + +When this finishes you will have in the ``quotes.jsonl`` file a list of the +quotes in JSON Lines format, containing text and author, looking like this:: + + {"author": "Jane Austen", "text": "\u201cThe person, be it gentleman or lady, who has not pleasure in a good novel, must be intolerably stupid.\u201d"} + {"author": "Steve Martin", "text": "\u201cA day without sunshine is like, you know, night.\u201d"} + {"author": "Garrison Keillor", "text": "\u201cAnyone who thinks sitting in church can make you a Christian must also think that sitting in a garage can make you a car.\u201d"} + ... + + +What just happened? +------------------- + +When you ran the command ``scrapy runspider quotes_spider.py``, Scrapy looked for a +Spider definition inside it and ran it through its crawler engine. + +The crawl started by making requests to the URLs defined in the ``start_urls`` +attribute (in this case, only the URL for quotes in *humor* category) +and called the default callback method ``parse``, passing the response object as +an argument. In the ``parse`` callback, we loop through the quote elements +using a CSS Selector, yield a Python dict with the extracted quote text and author, +look for a link to the next page and schedule another request using the same +``parse`` method as callback. + +Here you notice one of the main advantages about Scrapy: requests are +:ref:`scheduled and processed asynchronously `. This +means that Scrapy doesn't need to wait for a request to be finished and +processed, it can send another request or do other things in the meantime. This +also means that other requests can keep going even if some request fails or an +error happens while handling it. + +While this enables you to do very fast crawls (sending multiple concurrent +requests at the same time, in a fault-tolerant way) Scrapy also gives you +control over the politeness of the crawl through :ref:`a few settings +`. You can do things like setting a download delay between +each request, limiting amount of concurrent requests per domain or per IP, and +even :ref:`using an auto-throttling extension ` that tries +to figure out these automatically. + +.. note:: + + This is using :ref:`feed exports ` to generate the + JSON file, you can easily change the export format (XML or CSV, for example) or the + storage backend (FTP or `Amazon S3`_, for example). You can also write an + :ref:`item pipeline ` to store the items in a database. + + +.. _topics-whatelse: + +What else? +========== + +You've seen how to extract and store items from a website using Scrapy, but +this is just the surface. Scrapy provides a lot of powerful features for making +scraping easy and efficient, such as: + +* Built-in support for :ref:`selecting and extracting ` data + from HTML/XML sources using extended CSS selectors and XPath expressions, + with helper methods to extract using regular expressions. + +* An :ref:`interactive shell console ` (IPython aware) for trying + out the CSS and XPath expressions to scrape data, very useful when writing or + debugging your spiders. + +* Built-in support for :ref:`generating feed exports ` in + multiple formats (JSON, CSV, XML) and storing them in multiple backends (FTP, + S3, local filesystem) + +* Robust encoding support and auto-detection, for dealing with foreign, + non-standard and broken encoding declarations. + +* :ref:`Strong extensibility support `, allowing you to plug + in your own functionality using :ref:`signals ` and a + well-defined API (middlewares, :ref:`extensions `, and + :ref:`pipelines `). + +* Wide range of built-in extensions and middlewares for handling: + + - cookies and session handling + - HTTP features like compression, authentication, caching + - user-agent spoofing + - robots.txt + - crawl depth restriction + - and more + +* A :ref:`Telnet console ` for hooking into a Python + console running inside your Scrapy process, to introspect and debug your + crawler + +* Plus other goodies like reusable spiders to crawl sites from `Sitemaps`_ and + XML/CSV feeds, a media pipeline for :ref:`automatically downloading images + ` (or any other media) associated with the scraped + items, a caching DNS resolver, and much more! + +What's next? +============ + +The next steps for you are to :ref:`install Scrapy `, +:ref:`follow through the tutorial ` to learn how to create +a full-blown Scrapy project and `join the community`_. Thanks for your +interest! + +.. _join the community: https://scrapy.org/community/ +.. _web scraping: https://en.wikipedia.org/wiki/Web_scraping +.. _Amazon Associates Web Services: https://affiliate-program.amazon.com/gp/advertising/api/detail/main.html +.. _Amazon S3: https://aws.amazon.com/s3/ +.. _Sitemaps: https://www.sitemaps.org/index.html diff --git a/scrapy-master/docs/intro/tutorial.rst b/scrapy-master/docs/intro/tutorial.rst new file mode 100644 index 0000000..064ce05 --- /dev/null +++ b/scrapy-master/docs/intro/tutorial.rst @@ -0,0 +1,827 @@ +.. _intro-tutorial: + +=============== +Scrapy Tutorial +=============== + +In this tutorial, we'll assume that Scrapy is already installed on your system. +If that's not the case, see :ref:`intro-install`. + +We are going to scrape `quotes.toscrape.com `_, a website +that lists quotes from famous authors. + +This tutorial will walk you through these tasks: + +1. Creating a new Scrapy project +2. Writing a :ref:`spider ` to crawl a site and extract data +3. Exporting the scraped data using the command line +4. Changing spider to recursively follow links +5. Using spider arguments + +Scrapy is written in Python_. If you're new to the language you might want to +start by getting an idea of what the language is like, to get the most out of +Scrapy. + +If you're already familiar with other languages, and want to learn Python quickly, the `Python Tutorial`_ is a good resource. + +If you're new to programming and want to start with Python, the following books +may be useful to you: + +* `Automate the Boring Stuff With Python`_ + +* `How To Think Like a Computer Scientist`_ + +* `Learn Python 3 The Hard Way`_ + +You can also take a look at `this list of Python resources for non-programmers`_, +as well as the `suggested resources in the learnpython-subreddit`_. + +.. _Python: https://www.python.org/ +.. _this list of Python resources for non-programmers: https://wiki.python.org/moin/BeginnersGuide/NonProgrammers +.. _Python Tutorial: https://docs.python.org/3/tutorial +.. _Automate the Boring Stuff With Python: https://automatetheboringstuff.com/ +.. _How To Think Like a Computer Scientist: http://openbookproject.net/thinkcs/python/english3e/ +.. _Learn Python 3 The Hard Way: https://learnpythonthehardway.org/python3/ +.. _suggested resources in the learnpython-subreddit: https://www.reddit.com/r/learnpython/wiki/index#wiki_new_to_python.3F + + +Creating a project +================== + +Before you start scraping, you will have to set up a new Scrapy project. Enter a +directory where you'd like to store your code and run:: + + scrapy startproject tutorial + +This will create a ``tutorial`` directory with the following contents:: + + tutorial/ + scrapy.cfg # deploy configuration file + + tutorial/ # project's Python module, you'll import your code from here + __init__.py + + items.py # project items definition file + + middlewares.py # project middlewares file + + pipelines.py # project pipelines file + + settings.py # project settings file + + spiders/ # a directory where you'll later put your spiders + __init__.py + + +Our first Spider +================ + +Spiders are classes that you define and that Scrapy uses to scrape information +from a website (or a group of websites). They must subclass +:class:`~scrapy.Spider` and define the initial requests to make, +optionally how to follow links in the pages, and how to parse the downloaded +page content to extract data. + +This is the code for our first Spider. Save it in a file named +``quotes_spider.py`` under the ``tutorial/spiders`` directory in your project: + +.. code-block:: python + + from pathlib import Path + + import scrapy + + + class QuotesSpider(scrapy.Spider): + name = "quotes" + + def start_requests(self): + urls = [ + "https://quotes.toscrape.com/page/1/", + "https://quotes.toscrape.com/page/2/", + ] + for url in urls: + yield scrapy.Request(url=url, callback=self.parse) + + def parse(self, response): + page = response.url.split("/")[-2] + filename = f"quotes-{page}.html" + Path(filename).write_bytes(response.body) + self.log(f"Saved file {filename}") + + +As you can see, our Spider subclasses :class:`scrapy.Spider ` +and defines some attributes and methods: + +* :attr:`~scrapy.Spider.name`: identifies the Spider. It must be + unique within a project, that is, you can't set the same name for different + Spiders. + +* :meth:`~scrapy.Spider.start_requests`: must return an iterable of + Requests (you can return a list of requests or write a generator function) + which the Spider will begin to crawl from. Subsequent requests will be + generated successively from these initial requests. + +* :meth:`~scrapy.Spider.parse`: a method that will be called to handle + the response downloaded for each of the requests made. The response parameter + is an instance of :class:`~scrapy.http.TextResponse` that holds + the page content and has further helpful methods to handle it. + + The :meth:`~scrapy.Spider.parse` method usually parses the response, extracting + the scraped data as dicts and also finding new URLs to + follow and creating new requests (:class:`~scrapy.Request`) from them. + +How to run our spider +--------------------- + +To put our spider to work, go to the project's top level directory and run:: + + scrapy crawl quotes + +This command runs the spider with name ``quotes`` that we've just added, that +will send some requests for the ``quotes.toscrape.com`` domain. You will get an output +similar to this:: + + ... (omitted for brevity) + 2016-12-16 21:24:05 [scrapy.core.engine] INFO: Spider opened + 2016-12-16 21:24:05 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min) + 2016-12-16 21:24:05 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023 + 2016-12-16 21:24:05 [scrapy.core.engine] DEBUG: Crawled (404) (referer: None) + 2016-12-16 21:24:05 [scrapy.core.engine] DEBUG: Crawled (200) (referer: None) + 2016-12-16 21:24:05 [scrapy.core.engine] DEBUG: Crawled (200) (referer: None) + 2016-12-16 21:24:05 [quotes] DEBUG: Saved file quotes-1.html + 2016-12-16 21:24:05 [quotes] DEBUG: Saved file quotes-2.html + 2016-12-16 21:24:05 [scrapy.core.engine] INFO: Closing spider (finished) + ... + +Now, check the files in the current directory. You should notice that two new +files have been created: *quotes-1.html* and *quotes-2.html*, with the content +for the respective URLs, as our ``parse`` method instructs. + +.. note:: If you are wondering why we haven't parsed the HTML yet, hold + on, we will cover that soon. + + +What just happened under the hood? +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Scrapy schedules the :class:`scrapy.Request ` objects +returned by the ``start_requests`` method of the Spider. Upon receiving a +response for each one, it instantiates :class:`~scrapy.http.Response` objects +and calls the callback method associated with the request (in this case, the +``parse`` method) passing the response as argument. + + +A shortcut to the start_requests method +--------------------------------------- +Instead of implementing a :meth:`~scrapy.Spider.start_requests` method +that generates :class:`scrapy.Request ` objects from URLs, +you can just define a :attr:`~scrapy.Spider.start_urls` class attribute +with a list of URLs. This list will then be used by the default implementation +of :meth:`~scrapy.Spider.start_requests` to create the initial requests +for your spider. + +.. code-block:: python + + from pathlib import Path + + import scrapy + + + class QuotesSpider(scrapy.Spider): + name = "quotes" + start_urls = [ + "https://quotes.toscrape.com/page/1/", + "https://quotes.toscrape.com/page/2/", + ] + + def parse(self, response): + page = response.url.split("/")[-2] + filename = f"quotes-{page}.html" + Path(filename).write_bytes(response.body) + +The :meth:`~scrapy.Spider.parse` method will be called to handle each +of the requests for those URLs, even though we haven't explicitly told Scrapy +to do so. This happens because :meth:`~scrapy.Spider.parse` is Scrapy's +default callback method, which is called for requests without an explicitly +assigned callback. + + +Extracting data +--------------- + +The best way to learn how to extract data with Scrapy is trying selectors +using the :ref:`Scrapy shell `. Run:: + + scrapy shell 'https://quotes.toscrape.com/page/1/' + +.. note:: + + Remember to always enclose urls in quotes when running Scrapy shell from + command-line, otherwise urls containing arguments (i.e. ``&`` character) + will not work. + + On Windows, use double quotes instead:: + + scrapy shell "https://quotes.toscrape.com/page/1/" + +You will see something like:: + + [ ... Scrapy log here ... ] + 2016-09-19 12:09:27 [scrapy.core.engine] DEBUG: Crawled (200) (referer: None) + [s] Available Scrapy objects: + [s] scrapy scrapy module (contains scrapy.Request, scrapy.Selector, etc) + [s] crawler + [s] item {} + [s] request + [s] response <200 https://quotes.toscrape.com/page/1/> + [s] settings + [s] spider + [s] Useful shortcuts: + [s] shelp() Shell help (print this help) + [s] fetch(req_or_url) Fetch request (or URL) and update local objects + [s] view(response) View response in a browser + +Using the shell, you can try selecting elements using `CSS`_ with the response +object: + +.. invisible-code-block: python + + response = load_response('https://quotes.toscrape.com/page/1/', 'quotes1.html') + +.. code-block:: pycon + + >>> response.css("title") + [] + +The result of running ``response.css('title')`` is a list-like object called +:class:`~scrapy.selector.SelectorList`, which represents a list of +:class:`~scrapy.Selector` objects that wrap around XML/HTML elements +and allow you to run further queries to fine-grain the selection or extract the +data. + +To extract the text from the title above, you can do: + +.. code-block:: pycon + + >>> response.css("title::text").getall() + ['Quotes to Scrape'] + +There are two things to note here: one is that we've added ``::text`` to the +CSS query, to mean we want to select only the text elements directly inside +```` element. If we don't specify ``::text``, we'd get the full title +element, including its tags: + +.. code-block:: pycon + + >>> response.css("title").getall() + ['<title>Quotes to Scrape'] + +The other thing is that the result of calling ``.getall()`` is a list: it is +possible that a selector returns more than one result, so we extract them all. +When you know you just want the first result, as in this case, you can do: + +.. code-block:: pycon + + >>> response.css("title::text").get() + 'Quotes to Scrape' + +As an alternative, you could've written: + +.. code-block:: pycon + + >>> response.css("title::text")[0].get() + 'Quotes to Scrape' + +Accessing an index on a :class:`~scrapy.selector.SelectorList` instance will +raise an :exc:`IndexError` exception if there are no results: + +.. code-block:: pycon + + >>> response.css("noelement")[0].get() + Traceback (most recent call last): + ... + IndexError: list index out of range + +You might want to use ``.get()`` directly on the +:class:`~scrapy.selector.SelectorList` instance instead, which returns ``None`` +if there are no results: + +.. code-block:: pycon + + >>> response.css("noelement").get() + +There's a lesson here: for most scraping code, you want it to be resilient to +errors due to things not being found on a page, so that even if some parts fail +to be scraped, you can at least get **some** data. + +Besides the :meth:`~scrapy.selector.SelectorList.getall` and +:meth:`~scrapy.selector.SelectorList.get` methods, you can also use +the :meth:`~scrapy.selector.SelectorList.re` method to extract using +:doc:`regular expressions `: + +.. code-block:: pycon + + >>> response.css("title::text").re(r"Quotes.*") + ['Quotes to Scrape'] + >>> response.css("title::text").re(r"Q\w+") + ['Quotes'] + >>> response.css("title::text").re(r"(\w+) to (\w+)") + ['Quotes', 'Scrape'] + +In order to find the proper CSS selectors to use, you might find useful opening +the response page from the shell in your web browser using ``view(response)``. +You can use your browser's developer tools to inspect the HTML and come up +with a selector (see :ref:`topics-developer-tools`). + +`Selector Gadget`_ is also a nice tool to quickly find CSS selector for +visually selected elements, which works in many browsers. + +.. _Selector Gadget: https://selectorgadget.com/ + + +XPath: a brief intro +^^^^^^^^^^^^^^^^^^^^ + +Besides `CSS`_, Scrapy selectors also support using `XPath`_ expressions: + +.. code-block:: pycon + + >>> response.xpath("//title") + [] + >>> response.xpath("//title/text()").get() + 'Quotes to Scrape' + +XPath expressions are very powerful, and are the foundation of Scrapy +Selectors. In fact, CSS selectors are converted to XPath under-the-hood. You +can see that if you read closely the text representation of the selector +objects in the shell. + +While perhaps not as popular as CSS selectors, XPath expressions offer more +power because besides navigating the structure, it can also look at the +content. Using XPath, you're able to select things like: *select the link +that contains the text "Next Page"*. This makes XPath very fitting to the task +of scraping, and we encourage you to learn XPath even if you already know how to +construct CSS selectors, it will make scraping much easier. + +We won't cover much of XPath here, but you can read more about :ref:`using XPath +with Scrapy Selectors here `. To learn more about XPath, we +recommend `this tutorial to learn XPath through examples +`_, and `this tutorial to learn "how +to think in XPath" `_. + +.. _XPath: https://www.w3.org/TR/xpath/all/ +.. _CSS: https://www.w3.org/TR/selectors + +Extracting quotes and authors +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Now that you know a bit about selection and extraction, let's complete our +spider by writing the code to extract the quotes from the web page. + +Each quote in https://quotes.toscrape.com is represented by HTML elements that look +like this: + +.. code-block:: html + +
+ “The world as we have created it is a process of our + thinking. It cannot be changed without changing our thinking.” + + by Albert Einstein + (about) + +
+ Tags: + change + deep-thoughts + thinking + world +
+
+ +Let's open up scrapy shell and play a bit to find out how to extract the data +we want:: + + scrapy shell 'https://quotes.toscrape.com' + +We get a list of selectors for the quote HTML elements with: + +.. code-block:: pycon + + >>> response.css("div.quote") + [, + , + ...] + +Each of the selectors returned by the query above allows us to run further +queries over their sub-elements. Let's assign the first selector to a +variable, so that we can run our CSS selectors directly on a particular quote: + +.. code-block:: pycon + + >>> quote = response.css("div.quote")[0] + +Now, let's extract ``text``, ``author`` and the ``tags`` from that quote +using the ``quote`` object we just created: + +.. code-block:: pycon + + >>> text = quote.css("span.text::text").get() + >>> text + '“The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.”' + >>> author = quote.css("small.author::text").get() + >>> author + 'Albert Einstein' + +Given that the tags are a list of strings, we can use the ``.getall()`` method +to get all of them: + +.. code-block:: pycon + + >>> tags = quote.css("div.tags a.tag::text").getall() + >>> tags + ['change', 'deep-thoughts', 'thinking', 'world'] + +.. invisible-code-block: python + + from sys import version_info + +Having figured out how to extract each bit, we can now iterate over all the +quotes elements and put them together into a Python dictionary: + +.. code-block:: pycon + + >>> for quote in response.css("div.quote"): + ... text = quote.css("span.text::text").get() + ... author = quote.css("small.author::text").get() + ... tags = quote.css("div.tags a.tag::text").getall() + ... print(dict(text=text, author=author, tags=tags)) + ... + {'text': '“The world as we have created it is a process of our thinking. It cannot be changed without changing our thinking.”', 'author': 'Albert Einstein', 'tags': ['change', 'deep-thoughts', 'thinking', 'world']} + {'text': '“It is our choices, Harry, that show what we truly are, far more than our abilities.”', 'author': 'J.K. Rowling', 'tags': ['abilities', 'choices']} + ... + +Extracting data in our spider +----------------------------- + +Let's get back to our spider. Until now, it doesn't extract any data in +particular, just saves the whole HTML page to a local file. Let's integrate the +extraction logic above into our spider. + +A Scrapy spider typically generates many dictionaries containing the data +extracted from the page. To do that, we use the ``yield`` Python keyword +in the callback, as you can see below: + +.. code-block:: python + + import scrapy + + + class QuotesSpider(scrapy.Spider): + name = "quotes" + start_urls = [ + "https://quotes.toscrape.com/page/1/", + "https://quotes.toscrape.com/page/2/", + ] + + def parse(self, response): + for quote in response.css("div.quote"): + yield { + "text": quote.css("span.text::text").get(), + "author": quote.css("small.author::text").get(), + "tags": quote.css("div.tags a.tag::text").getall(), + } + +If you run this spider, it will output the extracted data with the log:: + + 2016-09-19 18:57:19 [scrapy.core.scraper] DEBUG: Scraped from <200 https://quotes.toscrape.com/page/1/> + {'tags': ['life', 'love'], 'author': 'André Gide', 'text': '“It is better to be hated for what you are than to be loved for what you are not.”'} + 2016-09-19 18:57:19 [scrapy.core.scraper] DEBUG: Scraped from <200 https://quotes.toscrape.com/page/1/> + {'tags': ['edison', 'failure', 'inspirational', 'paraphrased'], 'author': 'Thomas A. Edison', 'text': "“I have not failed. I've just found 10,000 ways that won't work.”"} + + +.. _storing-data: + +Storing the scraped data +======================== + +The simplest way to store the scraped data is by using :ref:`Feed exports +`, with the following command:: + + scrapy crawl quotes -O quotes.json + +That will generate a ``quotes.json`` file containing all scraped items, +serialized in `JSON`_. + +The ``-O`` command-line switch overwrites any existing file; use ``-o`` instead +to append new content to any existing file. However, appending to a JSON file +makes the file contents invalid JSON. When appending to a file, consider +using a different serialization format, such as `JSON Lines`_:: + + scrapy crawl quotes -o quotes.jsonl + +The `JSON Lines`_ format is useful because it's stream-like, you can easily +append new records to it. It doesn't have the same problem of JSON when you run +twice. Also, as each record is a separate line, you can process big files +without having to fit everything in memory, there are tools like `JQ`_ to help +do that at the command-line. + +In small projects (like the one in this tutorial), that should be enough. +However, if you want to perform more complex things with the scraped items, you +can write an :ref:`Item Pipeline `. A placeholder file +for Item Pipelines has been set up for you when the project is created, in +``tutorial/pipelines.py``. Though you don't need to implement any item +pipelines if you just want to store the scraped items. + +.. _JSON Lines: http://jsonlines.org +.. _JQ: https://stedolan.github.io/jq + + +Following links +=============== + +Let's say, instead of just scraping the stuff from the first two pages +from https://quotes.toscrape.com, you want quotes from all the pages in the website. + +Now that you know how to extract data from pages, let's see how to follow links +from them. + +First thing is to extract the link to the page we want to follow. Examining +our page, we can see there is a link to the next page with the following +markup: + +.. code-block:: html + + + +We can try extracting it in the shell: + +>>> response.css('li.next a').get() +'Next ' + +This gets the anchor element, but we want the attribute ``href``. For that, +Scrapy supports a CSS extension that lets you select the attribute contents, +like this: + +.. code-block:: pycon + + >>> response.css("li.next a::attr(href)").get() + '/page/2/' + +There is also an ``attrib`` property available +(see :ref:`selecting-attributes` for more): + +.. code-block:: pycon + + >>> response.css("li.next a").attrib["href"] + '/page/2/' + +Let's see now our spider modified to recursively follow the link to the next +page, extracting data from it: + +.. code-block:: python + + import scrapy + + + class QuotesSpider(scrapy.Spider): + name = "quotes" + start_urls = [ + "https://quotes.toscrape.com/page/1/", + ] + + def parse(self, response): + for quote in response.css("div.quote"): + yield { + "text": quote.css("span.text::text").get(), + "author": quote.css("small.author::text").get(), + "tags": quote.css("div.tags a.tag::text").getall(), + } + + next_page = response.css("li.next a::attr(href)").get() + if next_page is not None: + next_page = response.urljoin(next_page) + yield scrapy.Request(next_page, callback=self.parse) + + +Now, after extracting the data, the ``parse()`` method looks for the link to +the next page, builds a full absolute URL using the +:meth:`~scrapy.http.Response.urljoin` method (since the links can be +relative) and yields a new request to the next page, registering itself as +callback to handle the data extraction for the next page and to keep the +crawling going through all the pages. + +What you see here is Scrapy's mechanism of following links: when you yield +a Request in a callback method, Scrapy will schedule that request to be sent +and register a callback method to be executed when that request finishes. + +Using this, you can build complex crawlers that follow links according to rules +you define, and extract different kinds of data depending on the page it's +visiting. + +In our example, it creates a sort of loop, following all the links to the next page +until it doesn't find one -- handy for crawling blogs, forums and other sites with +pagination. + + +.. _response-follow-example: + +A shortcut for creating Requests +-------------------------------- + +As a shortcut for creating Request objects you can use +:meth:`response.follow `: + +.. code-block:: python + + import scrapy + + + class QuotesSpider(scrapy.Spider): + name = "quotes" + start_urls = [ + "https://quotes.toscrape.com/page/1/", + ] + + def parse(self, response): + for quote in response.css("div.quote"): + yield { + "text": quote.css("span.text::text").get(), + "author": quote.css("span small::text").get(), + "tags": quote.css("div.tags a.tag::text").getall(), + } + + next_page = response.css("li.next a::attr(href)").get() + if next_page is not None: + yield response.follow(next_page, callback=self.parse) + +Unlike scrapy.Request, ``response.follow`` supports relative URLs directly - no +need to call urljoin. Note that ``response.follow`` just returns a Request +instance; you still have to yield this Request. + +.. skip: start + +You can also pass a selector to ``response.follow`` instead of a string; +this selector should extract necessary attributes: + +.. code-block:: python + + for href in response.css("ul.pager a::attr(href)"): + yield response.follow(href, callback=self.parse) + +For ```` elements there is a shortcut: ``response.follow`` uses their href +attribute automatically. So the code can be shortened further: + +.. code-block:: python + + for a in response.css("ul.pager a"): + yield response.follow(a, callback=self.parse) + +To create multiple requests from an iterable, you can use +:meth:`response.follow_all ` instead: + +.. code-block:: python + + anchors = response.css("ul.pager a") + yield from response.follow_all(anchors, callback=self.parse) + +or, shortening it further: + +.. code-block:: python + + yield from response.follow_all(css="ul.pager a", callback=self.parse) + +.. skip: end + + +More examples and patterns +-------------------------- + +Here is another spider that illustrates callbacks and following links, +this time for scraping author information: + +.. code-block:: python + + import scrapy + + + class AuthorSpider(scrapy.Spider): + name = "author" + + start_urls = ["https://quotes.toscrape.com/"] + + def parse(self, response): + author_page_links = response.css(".author + a") + yield from response.follow_all(author_page_links, self.parse_author) + + pagination_links = response.css("li.next a") + yield from response.follow_all(pagination_links, self.parse) + + def parse_author(self, response): + def extract_with_css(query): + return response.css(query).get(default="").strip() + + yield { + "name": extract_with_css("h3.author-title::text"), + "birthdate": extract_with_css(".author-born-date::text"), + "bio": extract_with_css(".author-description::text"), + } + +This spider will start from the main page, it will follow all the links to the +authors pages calling the ``parse_author`` callback for each of them, and also +the pagination links with the ``parse`` callback as we saw before. + +Here we're passing callbacks to +:meth:`response.follow_all ` as positional +arguments to make the code shorter; it also works for +:class:`~scrapy.Request`. + +The ``parse_author`` callback defines a helper function to extract and cleanup the +data from a CSS query and yields the Python dict with the author data. + +Another interesting thing this spider demonstrates is that, even if there are +many quotes from the same author, we don't need to worry about visiting the +same author page multiple times. By default, Scrapy filters out duplicated +requests to URLs already visited, avoiding the problem of hitting servers too +much because of a programming mistake. This can be configured by the setting +:setting:`DUPEFILTER_CLASS`. + +Hopefully by now you have a good understanding of how to use the mechanism +of following links and callbacks with Scrapy. + +As yet another example spider that leverages the mechanism of following links, +check out the :class:`~scrapy.spiders.CrawlSpider` class for a generic +spider that implements a small rules engine that you can use to write your +crawlers on top of it. + +Also, a common pattern is to build an item with data from more than one page, +using a :ref:`trick to pass additional data to the callbacks +`. + + +Using spider arguments +====================== + +You can provide command line arguments to your spiders by using the ``-a`` +option when running them:: + + scrapy crawl quotes -O quotes-humor.json -a tag=humor + +These arguments are passed to the Spider's ``__init__`` method and become +spider attributes by default. + +In this example, the value provided for the ``tag`` argument will be available +via ``self.tag``. You can use this to make your spider fetch only quotes +with a specific tag, building the URL based on the argument: + +.. code-block:: python + + import scrapy + + + class QuotesSpider(scrapy.Spider): + name = "quotes" + + def start_requests(self): + url = "https://quotes.toscrape.com/" + tag = getattr(self, "tag", None) + if tag is not None: + url = url + "tag/" + tag + yield scrapy.Request(url, self.parse) + + def parse(self, response): + for quote in response.css("div.quote"): + yield { + "text": quote.css("span.text::text").get(), + "author": quote.css("small.author::text").get(), + } + + next_page = response.css("li.next a::attr(href)").get() + if next_page is not None: + yield response.follow(next_page, self.parse) + + +If you pass the ``tag=humor`` argument to this spider, you'll notice that it +will only visit URLs from the ``humor`` tag, such as +``https://quotes.toscrape.com/tag/humor``. + +You can :ref:`learn more about handling spider arguments here `. + +Next steps +========== + +This tutorial covered only the basics of Scrapy, but there's a lot of other +features not mentioned here. Check the :ref:`topics-whatelse` section in +:ref:`intro-overview` chapter for a quick overview of the most important ones. + +You can continue from the section :ref:`section-basics` to know more about the +command-line tool, spiders, selectors and other things the tutorial hasn't covered like +modeling the scraped data. If you prefer to play with an example project, check +the :ref:`intro-examples` section. + +.. _JSON: https://en.wikipedia.org/wiki/JSON diff --git a/scrapy-master/docs/news.rst b/scrapy-master/docs/news.rst new file mode 100644 index 0000000..9b9eeac --- /dev/null +++ b/scrapy-master/docs/news.rst @@ -0,0 +1,5662 @@ +.. _news: + +Release notes +============= + +.. _release-2.8.0: + +Scrapy 2.8.0 (2023-02-02) +------------------------- + +This is a maintenance release, with minor features, bug fixes, and cleanups. + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +- The ``scrapy.utils.gz.read1`` function, deprecated in Scrapy 2.0, has now + been removed. Use the :meth:`~io.BufferedIOBase.read1` method of + :class:`~gzip.GzipFile` instead. + (:issue:`5719`) + +- The ``scrapy.utils.python.to_native_str`` function, deprecated in Scrapy + 2.0, has now been removed. Use :func:`scrapy.utils.python.to_unicode` + instead. + (:issue:`5719`) + +- The ``scrapy.utils.python.MutableChain.next`` method, deprecated in Scrapy + 2.0, has now been removed. Use + :meth:`~scrapy.utils.python.MutableChain.__next__` instead. + (:issue:`5719`) + +- The ``scrapy.linkextractors.FilteringLinkExtractor`` class, deprecated + in Scrapy 2.0, has now been removed. Use + :class:`LinkExtractor ` + instead. + (:issue:`5720`) + +- Support for using environment variables prefixed with ``SCRAPY_`` to + override settings, deprecated in Scrapy 2.0, has now been removed. + (:issue:`5724`) + +- Support for the ``noconnect`` query string argument in proxy URLs, + deprecated in Scrapy 2.0, has now been removed. We expect proxies that used + to need it to work fine without it. + (:issue:`5731`) + +- The ``scrapy.utils.python.retry_on_eintr`` function, deprecated in Scrapy + 2.3, has now been removed. + (:issue:`5719`) + +- The ``scrapy.utils.python.WeakKeyCache`` class, deprecated in Scrapy 2.4, + has now been removed. + (:issue:`5719`) + + +Deprecations +~~~~~~~~~~~~ + +- :exc:`scrapy.pipelines.images.NoimagesDrop` is now deprecated. + (:issue:`5368`, :issue:`5489`) + +- :meth:`ImagesPipeline.convert_image + ` must now accept a + ``response_body`` parameter. + (:issue:`3055`, :issue:`3689`, :issue:`4753`) + + +New features +~~~~~~~~~~~~ + +- Applied black_ coding style to files generated with the + :command:`genspider` and :command:`startproject` commands. + (:issue:`5809`, :issue:`5814`) + + .. _black: https://black.readthedocs.io/en/stable/ + +- :setting:`FEED_EXPORT_ENCODING` is now set to ``"utf-8"`` in the + ``settings.py`` file that the :command:`startproject` command generates. + With this value, JSON exports won’t force the use of escape sequences for + non-ASCII characters. + (:issue:`5797`, :issue:`5800`) + +- The :class:`~scrapy.extensions.memusage.MemoryUsage` extension now logs the + peak memory usage during checks, and the binary unit MiB is now used to + avoid confusion. + (:issue:`5717`, :issue:`5722`, :issue:`5727`) + +- The ``callback`` parameter of :class:`~scrapy.http.Request` can now be set + to :func:`scrapy.http.request.NO_CALLBACK`, to distinguish it from + ``None``, as the latter indicates that the default spider callback + (:meth:`~scrapy.Spider.parse`) is to be used. + (:issue:`5798`) + + +Bug fixes +~~~~~~~~~ + +- Enabled unsafe legacy SSL renegotiation to fix access to some outdated + websites. + (:issue:`5491`, :issue:`5790`) + +- Fixed STARTTLS-based email delivery not working with Twisted 21.2.0 and + better. + (:issue:`5386`, :issue:`5406`) + +- Fixed the :meth:`finish_exporting` method of :ref:`item exporters + ` not being called for empty files. + (:issue:`5537`, :issue:`5758`) + +- Fixed HTTP/2 responses getting only the last value for a header when + multiple headers with the same name are received. + (:issue:`5777`) + +- Fixed an exception raised by the :command:`shell` command on some cases + when :ref:`using asyncio `. + (:issue:`5740`, :issue:`5742`, :issue:`5748`, :issue:`5759`, :issue:`5760`, + :issue:`5771`) + +- When using :class:`~scrapy.spiders.CrawlSpider`, callback keyword arguments + (``cb_kwargs``) added to a request in the ``process_request`` callback of a + :class:`~scrapy.spiders.Rule` will no longer be ignored. + (:issue:`5699`) + +- The :ref:`images pipeline ` no longer re-encodes JPEG + files. + (:issue:`3055`, :issue:`3689`, :issue:`4753`) + +- Fixed the handling of transparent WebP images by the :ref:`images pipeline + `. + (:issue:`3072`, :issue:`5766`, :issue:`5767`) + +- :func:`scrapy.shell.inspect_response` no longer inhibits ``SIGINT`` + (Ctrl+C). + (:issue:`2918`) + +- :class:`LinkExtractor ` + with ``unique=False`` no longer filters out links that have identical URL + *and* text. + (:issue:`3798`, :issue:`3799`, :issue:`4695`, :issue:`5458`) + +- :class:`~scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware` now + ignores URL protocols that do not support ``robots.txt`` (``data://``, + ``file://``). + (:issue:`5807`) + +- Silenced the ``filelock`` debug log messages introduced in Scrapy 2.6. + (:issue:`5753`, :issue:`5754`) + +- Fixed the output of ``scrapy -h`` showing an unintended ``**commands**`` + line. + (:issue:`5709`, :issue:`5711`, :issue:`5712`) + +- Made the active project indication in the output of :ref:`commands + ` more clear. + (:issue:`5715`) + + +Documentation +~~~~~~~~~~~~~ + +- Documented how to :ref:`debug spiders from Visual Studio Code + `. + (:issue:`5721`) + +- Documented how :setting:`DOWNLOAD_DELAY` affects per-domain concurrency. + (:issue:`5083`, :issue:`5540`) + +- Improved consistency. + (:issue:`5761`) + +- Fixed typos. + (:issue:`5714`, :issue:`5744`, :issue:`5764`) + + +Quality assurance +~~~~~~~~~~~~~~~~~ + +- Applied :ref:`black coding style `, sorted import statements, + and introduced :ref:`pre-commit `. + (:issue:`4654`, :issue:`4658`, :issue:`5734`, :issue:`5737`, :issue:`5806`, + :issue:`5810`) + +- Switched from :mod:`os.path` to :mod:`pathlib`. + (:issue:`4916`, :issue:`4497`, :issue:`5682`) + +- Addressed many issues reported by Pylint. + (:issue:`5677`) + +- Improved code readability. + (:issue:`5736`) + +- Improved package metadata. + (:issue:`5768`) + +- Removed direct invocations of ``setup.py``. + (:issue:`5774`, :issue:`5776`) + +- Removed unnecessary :class:`~collections.OrderedDict` usages. + (:issue:`5795`) + +- Removed unnecessary ``__str__`` definitions. + (:issue:`5150`) + +- Removed obsolete code and comments. + (:issue:`5725`, :issue:`5729`, :issue:`5730`, :issue:`5732`) + +- Fixed test and CI issues. + (:issue:`5749`, :issue:`5750`, :issue:`5756`, :issue:`5762`, :issue:`5765`, + :issue:`5780`, :issue:`5781`, :issue:`5782`, :issue:`5783`, :issue:`5785`, + :issue:`5786`) + + +.. _release-2.7.1: + +Scrapy 2.7.1 (2022-11-02) +------------------------- + +New features +~~~~~~~~~~~~ + +- Relaxed the restriction introduced in 2.6.2 so that the + ``Proxy-Authorization`` header can again be set explicitly, as long as the + proxy URL in the :reqmeta:`proxy` metadata has no other credentials, and + for as long as that proxy URL remains the same; this restores compatibility + with scrapy-zyte-smartproxy 2.1.0 and older (:issue:`5626`). + +Bug fixes +~~~~~~~~~ + +- Using ``-O``/``--overwrite-output`` and ``-t``/``--output-format`` options + together now produces an error instead of ignoring the former option + (:issue:`5516`, :issue:`5605`). + +- Replaced deprecated :mod:`asyncio` APIs that implicitly use the current + event loop with code that explicitly requests a loop from the event loop + policy (:issue:`5685`, :issue:`5689`). + +- Fixed uses of deprecated Scrapy APIs in Scrapy itself (:issue:`5588`, + :issue:`5589`). + +- Fixed uses of a deprecated Pillow API (:issue:`5684`, :issue:`5692`). + +- Improved code that checks if generators return values, so that it no longer + fails on decorated methods and partial methods (:issue:`5323`, + :issue:`5592`, :issue:`5599`, :issue:`5691`). + +Documentation +~~~~~~~~~~~~~ + +- Upgraded the Code of Conduct to Contributor Covenant v2.1 (:issue:`5698`). + +- Fixed typos (:issue:`5681`, :issue:`5694`). + +Quality assurance +~~~~~~~~~~~~~~~~~ + +- Re-enabled some erroneously disabled flake8 checks (:issue:`5688`). + +- Ignored harmless deprecation warnings from :mod:`typing` in tests + (:issue:`5686`, :issue:`5697`). + +- Modernized our CI configuration (:issue:`5695`, :issue:`5696`). + + +.. _release-2.7.0: + +Scrapy 2.7.0 (2022-10-17) +----------------------------- + +Highlights: + +- Added Python 3.11 support, dropped Python 3.6 support +- Improved support for :ref:`asynchronous callbacks ` +- :ref:`Asyncio support ` is enabled by default on new + projects +- Output names of item fields can now be arbitrary strings +- Centralized :ref:`request fingerprinting ` + configuration is now possible + +Modified requirements +~~~~~~~~~~~~~~~~~~~~~ + +Python 3.7 or greater is now required; support for Python 3.6 has been dropped. +Support for the upcoming Python 3.11 has been added. + +The minimum required version of some dependencies has changed as well: + +- lxml_: 3.5.0 → 4.3.0 + +- Pillow_ (:ref:`images pipeline `): 4.0.0 → 7.1.0 + +- zope.interface_: 5.0.0 → 5.1.0 + +(:issue:`5512`, :issue:`5514`, :issue:`5524`, :issue:`5563`, :issue:`5664`, +:issue:`5670`, :issue:`5678`) + + +Deprecations +~~~~~~~~~~~~ + +- :meth:`ImagesPipeline.thumb_path + ` must now accept an + ``item`` parameter (:issue:`5504`, :issue:`5508`). + +- The ``scrapy.downloadermiddlewares.decompression`` module is now + deprecated (:issue:`5546`, :issue:`5547`). + + +New features +~~~~~~~~~~~~ + +- The + :meth:`~scrapy.spidermiddlewares.SpiderMiddleware.process_spider_output` + method of :ref:`spider middlewares ` can now be + defined as an :term:`asynchronous generator` (:issue:`4978`). + +- The output of :class:`~scrapy.Request` callbacks defined as + :ref:`coroutines ` is now processed asynchronously + (:issue:`4978`). + +- :class:`~scrapy.spiders.crawl.CrawlSpider` now supports :ref:`asynchronous + callbacks ` (:issue:`5657`). + +- New projects created with the :command:`startproject` command have + :ref:`asyncio support ` enabled by default (:issue:`5590`, + :issue:`5679`). + +- The :setting:`FEED_EXPORT_FIELDS` setting can now be defined as a + dictionary to customize the output name of item fields, lifting the + restriction that required output names to be valid Python identifiers, e.g. + preventing them to have whitespace (:issue:`1008`, :issue:`3266`, + :issue:`3696`). + +- You can now customize :ref:`request fingerprinting ` + through the new :setting:`REQUEST_FINGERPRINTER_CLASS` setting, instead of + having to change it on every Scrapy component that relies on request + fingerprinting (:issue:`900`, :issue:`3420`, :issue:`4113`, :issue:`4762`, + :issue:`4524`). + +- ``jsonl`` is now supported and encouraged as a file extension for `JSON + Lines`_ files (:issue:`4848`). + + .. _JSON Lines: https://jsonlines.org/ + +- :meth:`ImagesPipeline.thumb_path + ` now receives the + source :ref:`item ` (:issue:`5504`, :issue:`5508`). + + +Bug fixes +~~~~~~~~~ + +- When using Google Cloud Storage with a :ref:`media pipeline + `, :setting:`FILES_EXPIRES` now also works when + :setting:`FILES_STORE` does not point at the root of your Google Cloud + Storage bucket (:issue:`5317`, :issue:`5318`). + +- The :command:`parse` command now supports :ref:`asynchronous callbacks + ` (:issue:`5424`, :issue:`5577`). + +- When using the :command:`parse` command with a URL for which there is no + available spider, an exception is no longer raised (:issue:`3264`, + :issue:`3265`, :issue:`5375`, :issue:`5376`, :issue:`5497`). + +- :class:`~scrapy.http.TextResponse` now gives higher priority to the `byte + order mark`_ when determining the text encoding of the response body, + following the `HTML living standard`_ (:issue:`5601`, :issue:`5611`). + + .. _byte order mark: https://en.wikipedia.org/wiki/Byte_order_mark + .. _HTML living standard: https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding + +- MIME sniffing takes the response body into account in FTP and HTTP/1.0 + requests, as well as in cached requests (:issue:`4873`). + +- MIME sniffing now detects valid HTML 5 documents even if the ``html`` tag + is missing (:issue:`4873`). + +- An exception is now raised if :setting:`ASYNCIO_EVENT_LOOP` has a value + that does not match the asyncio event loop actually installed + (:issue:`5529`). + +- Fixed :meth:`Headers.getlist ` + returning only the last header (:issue:`5515`, :issue:`5526`). + +- Fixed :class:`LinkExtractor + ` not ignoring the + ``tar.gz`` file extension by default (:issue:`1837`, :issue:`2067`, + :issue:`4066`) + + +Documentation +~~~~~~~~~~~~~ + +- Clarified the return type of :meth:`Spider.parse ` + (:issue:`5602`, :issue:`5608`). + +- To enable + :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware` + to do `brotli compression`_, installing brotli_ is now recommended instead + of installing brotlipy_, as the former provides a more recent version of + brotli. + + .. _brotli: https://github.com/google/brotli + .. _brotli compression: https://www.ietf.org/rfc/rfc7932.txt + +- :ref:`Signal documentation ` now mentions :ref:`coroutine + support ` and uses it in code examples (:issue:`4852`, + :issue:`5358`). + +- :ref:`bans` now recommends `Common Crawl`_ instead of `Google cache`_ + (:issue:`3582`, :issue:`5432`). + + .. _Common Crawl: https://commoncrawl.org/ + .. _Google cache: http://www.googleguide.com/cached_pages.html + +- The new :ref:`topics-components` topic covers enforcing requirements on + Scrapy components, like :ref:`downloader middlewares + `, :ref:`extensions `, + :ref:`item pipelines `, :ref:`spider middlewares + `, and more; :ref:`enforce-asyncio-requirement` + has also been added (:issue:`4978`). + +- :ref:`topics-settings` now indicates that setting values must be + :ref:`picklable ` (:issue:`5607`, :issue:`5629`). + +- Removed outdated documentation (:issue:`5446`, :issue:`5373`, + :issue:`5369`, :issue:`5370`, :issue:`5554`). + +- Fixed typos (:issue:`5442`, :issue:`5455`, :issue:`5457`, :issue:`5461`, + :issue:`5538`, :issue:`5553`, :issue:`5558`, :issue:`5624`, :issue:`5631`). + +- Fixed other issues (:issue:`5283`, :issue:`5284`, :issue:`5559`, + :issue:`5567`, :issue:`5648`, :issue:`5659`, :issue:`5665`). + + +Quality assurance +~~~~~~~~~~~~~~~~~ + +- Added a continuous integration job to run `twine check`_ (:issue:`5655`, + :issue:`5656`). + + .. _twine check: https://twine.readthedocs.io/en/stable/#twine-check + +- Addressed test issues and warnings (:issue:`5560`, :issue:`5561`, + :issue:`5612`, :issue:`5617`, :issue:`5639`, :issue:`5645`, :issue:`5662`, + :issue:`5671`, :issue:`5675`). + +- Cleaned up code (:issue:`4991`, :issue:`4995`, :issue:`5451`, + :issue:`5487`, :issue:`5542`, :issue:`5667`, :issue:`5668`, :issue:`5672`). + +- Applied minor code improvements (:issue:`5661`). + + +.. _release-2.6.3: + +Scrapy 2.6.3 (2022-09-27) +------------------------- + +- Added support for pyOpenSSL_ 22.1.0, removing support for SSLv3 + (:issue:`5634`, :issue:`5635`, :issue:`5636`). + +- Upgraded the minimum versions of the following dependencies: + + - cryptography_: 2.0 → 3.3 + + - pyOpenSSL_: 16.2.0 → 21.0.0 + + - service_identity_: 16.0.0 → 18.1.0 + + - Twisted_: 17.9.0 → 18.9.0 + + - zope.interface_: 4.1.3 → 5.0.0 + + (:issue:`5621`, :issue:`5632`) + +- Fixes test and documentation issues (:issue:`5612`, :issue:`5617`, + :issue:`5631`). + + +.. _release-2.6.2: + +Scrapy 2.6.2 (2022-07-25) +------------------------- + +**Security bug fix:** + +- When :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` + processes a request with :reqmeta:`proxy` metadata, and that + :reqmeta:`proxy` metadata includes proxy credentials, + :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` sets + the ``Proxy-Authorization`` header, but only if that header is not already + set. + + There are third-party proxy-rotation downloader middlewares that set + different :reqmeta:`proxy` metadata every time they process a request. + + Because of request retries and redirects, the same request can be processed + by downloader middlewares more than once, including both + :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` and + any third-party proxy-rotation downloader middleware. + + These third-party proxy-rotation downloader middlewares could change the + :reqmeta:`proxy` metadata of a request to a new value, but fail to remove + the ``Proxy-Authorization`` header from the previous value of the + :reqmeta:`proxy` metadata, causing the credentials of one proxy to be sent + to a different proxy. + + To prevent the unintended leaking of proxy credentials, the behavior of + :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` is now + as follows when processing a request: + + - If the request being processed defines :reqmeta:`proxy` metadata that + includes credentials, the ``Proxy-Authorization`` header is always + updated to feature those credentials. + + - If the request being processed defines :reqmeta:`proxy` metadata + without credentials, the ``Proxy-Authorization`` header is removed + *unless* it was originally defined for the same proxy URL. + + To remove proxy credentials while keeping the same proxy URL, remove + the ``Proxy-Authorization`` header. + + - If the request has no :reqmeta:`proxy` metadata, or that metadata is a + falsy value (e.g. ``None``), the ``Proxy-Authorization`` header is + removed. + + It is no longer possible to set a proxy URL through the + :reqmeta:`proxy` metadata but set the credentials through the + ``Proxy-Authorization`` header. Set proxy credentials through the + :reqmeta:`proxy` metadata instead. + +Also fixes the following regressions introduced in 2.6.0: + +- :class:`~scrapy.crawler.CrawlerProcess` supports again crawling multiple + spiders (:issue:`5435`, :issue:`5436`) + +- Installing a Twisted reactor before Scrapy does (e.g. importing + :mod:`twisted.internet.reactor` somewhere at the module level) no longer + prevents Scrapy from starting, as long as a different reactor is not + specified in :setting:`TWISTED_REACTOR` (:issue:`5525`, :issue:`5528`) + +- Fixed an exception that was being logged after the spider finished under + certain conditions (:issue:`5437`, :issue:`5440`) + +- The ``--output``/``-o`` command-line parameter supports again a value + starting with a hyphen (:issue:`5444`, :issue:`5445`) + +- The ``scrapy parse -h`` command no longer throws an error (:issue:`5481`, + :issue:`5482`) + + +.. _release-2.6.1: + +Scrapy 2.6.1 (2022-03-01) +------------------------- + +Fixes a regression introduced in 2.6.0 that would unset the request method when +following redirects. + + +.. _release-2.6.0: + +Scrapy 2.6.0 (2022-03-01) +------------------------- + +Highlights: + +* :ref:`Security fixes for cookie handling <2.6-security-fixes>` + +* Python 3.10 support + +* :ref:`asyncio support ` is no longer considered + experimental, and works out-of-the-box on Windows regardless of your Python + version + +* Feed exports now support :class:`pathlib.Path` output paths and per-feed + :ref:`item filtering ` and + :ref:`post-processing ` + +.. _2.6-security-fixes: + +Security bug fixes +~~~~~~~~~~~~~~~~~~ + +- When a :class:`~scrapy.http.Request` object with cookies defined gets a + redirect response causing a new :class:`~scrapy.http.Request` object to be + scheduled, the cookies defined in the original + :class:`~scrapy.http.Request` object are no longer copied into the new + :class:`~scrapy.http.Request` object. + + If you manually set the ``Cookie`` header on a + :class:`~scrapy.http.Request` object and the domain name of the redirect + URL is not an exact match for the domain of the URL of the original + :class:`~scrapy.http.Request` object, your ``Cookie`` header is now dropped + from the new :class:`~scrapy.http.Request` object. + + The old behavior could be exploited by an attacker to gain access to your + cookies. Please, see the `cjvr-mfj7-j4j8 security advisory`_ for more + information. + + .. _cjvr-mfj7-j4j8 security advisory: https://github.com/scrapy/scrapy/security/advisories/GHSA-cjvr-mfj7-j4j8 + + .. note:: It is still possible to enable the sharing of cookies between + different domains with a shared domain suffix (e.g. + ``example.com`` and any subdomain) by defining the shared domain + suffix (e.g. ``example.com``) as the cookie domain when defining + your cookies. See the documentation of the + :class:`~scrapy.http.Request` class for more information. + +- When the domain of a cookie, either received in the ``Set-Cookie`` header + of a response or defined in a :class:`~scrapy.http.Request` object, is set + to a `public suffix `_, the cookie is now + ignored unless the cookie domain is the same as the request domain. + + The old behavior could be exploited by an attacker to inject cookies from a + controlled domain into your cookiejar that could be sent to other domains + not controlled by the attacker. Please, see the `mfjm-vh54-3f96 security + advisory`_ for more information. + + .. _mfjm-vh54-3f96 security advisory: https://github.com/scrapy/scrapy/security/advisories/GHSA-mfjm-vh54-3f96 + + +Modified requirements +~~~~~~~~~~~~~~~~~~~~~ + +- The h2_ dependency is now optional, only needed to + :ref:`enable HTTP/2 support `. (:issue:`5113`) + + .. _h2: https://pypi.org/project/h2/ + + +Backward-incompatible changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- The ``formdata`` parameter of :class:`~scrapy.FormRequest`, if specified + for a non-POST request, now overrides the URL query string, instead of + being appended to it. (:issue:`2919`, :issue:`3579`) + +- When a function is assigned to the :setting:`FEED_URI_PARAMS` setting, now + the return value of that function, and not the ``params`` input parameter, + will determine the feed URI parameters, unless that return value is + ``None``. (:issue:`4962`, :issue:`4966`) + +- In :class:`scrapy.core.engine.ExecutionEngine`, methods + :meth:`~scrapy.core.engine.ExecutionEngine.crawl`, + :meth:`~scrapy.core.engine.ExecutionEngine.download`, + :meth:`~scrapy.core.engine.ExecutionEngine.schedule`, + and :meth:`~scrapy.core.engine.ExecutionEngine.spider_is_idle` + now raise :exc:`RuntimeError` if called before + :meth:`~scrapy.core.engine.ExecutionEngine.open_spider`. (:issue:`5090`) + + These methods used to assume that + :attr:`ExecutionEngine.slot ` had + been defined by a prior call to + :meth:`~scrapy.core.engine.ExecutionEngine.open_spider`, so they were + raising :exc:`AttributeError` instead. + +- If the API of the configured :ref:`scheduler ` does not + meet expectations, :exc:`TypeError` is now raised at startup time. Before, + other exceptions would be raised at run time. (:issue:`3559`) + +- The ``_encoding`` field of serialized :class:`~scrapy.http.Request` objects + is now named ``encoding``, in line with all other fields (:issue:`5130`) + + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +- ``scrapy.http.TextResponse.body_as_unicode``, deprecated in Scrapy 2.2, has + now been removed. (:issue:`5393`) + +- ``scrapy.item.BaseItem``, deprecated in Scrapy 2.2, has now been removed. + (:issue:`5398`) + +- ``scrapy.item.DictItem``, deprecated in Scrapy 1.8, has now been removed. + (:issue:`5398`) + +- ``scrapy.Spider.make_requests_from_url``, deprecated in Scrapy 1.4, has now + been removed. (:issue:`4178`, :issue:`4356`) + + +Deprecations +~~~~~~~~~~~~ + +- When a function is assigned to the :setting:`FEED_URI_PARAMS` setting, + returning ``None`` or modifying the ``params`` input parameter is now + deprecated. Return a new dictionary instead. (:issue:`4962`, :issue:`4966`) + +- :mod:`scrapy.utils.reqser` is deprecated. (:issue:`5130`) + + - Instead of :func:`~scrapy.utils.reqser.request_to_dict`, use the new + :meth:`Request.to_dict ` method. + + - Instead of :func:`~scrapy.utils.reqser.request_from_dict`, use the new + :func:`scrapy.utils.request.request_from_dict` function. + +- In :mod:`scrapy.squeues`, the following queue classes are deprecated: + :class:`~scrapy.squeues.PickleFifoDiskQueueNonRequest`, + :class:`~scrapy.squeues.PickleLifoDiskQueueNonRequest`, + :class:`~scrapy.squeues.MarshalFifoDiskQueueNonRequest`, + and :class:`~scrapy.squeues.MarshalLifoDiskQueueNonRequest`. You should + instead use: + :class:`~scrapy.squeues.PickleFifoDiskQueue`, + :class:`~scrapy.squeues.PickleLifoDiskQueue`, + :class:`~scrapy.squeues.MarshalFifoDiskQueue`, + and :class:`~scrapy.squeues.MarshalLifoDiskQueue`. (:issue:`5117`) + +- Many aspects of :class:`scrapy.core.engine.ExecutionEngine` that come from + a time when this class could handle multiple :class:`~scrapy.Spider` + objects at a time have been deprecated. (:issue:`5090`) + + - The :meth:`~scrapy.core.engine.ExecutionEngine.has_capacity` method + is deprecated. + + - The :meth:`~scrapy.core.engine.ExecutionEngine.schedule` method is + deprecated, use :meth:`~scrapy.core.engine.ExecutionEngine.crawl` or + :meth:`~scrapy.core.engine.ExecutionEngine.download` instead. + + - The :attr:`~scrapy.core.engine.ExecutionEngine.open_spiders` attribute + is deprecated, use :attr:`~scrapy.core.engine.ExecutionEngine.spider` + instead. + + - The ``spider`` parameter is deprecated for the following methods: + + - :meth:`~scrapy.core.engine.ExecutionEngine.spider_is_idle` + + - :meth:`~scrapy.core.engine.ExecutionEngine.crawl` + + - :meth:`~scrapy.core.engine.ExecutionEngine.download` + + Instead, call :meth:`~scrapy.core.engine.ExecutionEngine.open_spider` + first to set the :class:`~scrapy.Spider` object. + + +New features +~~~~~~~~~~~~ + +- You can now use :ref:`item filtering ` to control which items + are exported to each output feed. (:issue:`4575`, :issue:`5178`, + :issue:`5161`, :issue:`5203`) + +- You can now apply :ref:`post-processing ` to feeds, and + :ref:`built-in post-processing plugins ` are provided for + output file compression. (:issue:`2174`, :issue:`5168`, :issue:`5190`) + +- The :setting:`FEEDS` setting now supports :class:`pathlib.Path` objects as + keys. (:issue:`5383`, :issue:`5384`) + +- Enabling :ref:`asyncio ` while using Windows and Python 3.8 + or later will automatically switch the asyncio event loop to one that + allows Scrapy to work. See :ref:`asyncio-windows`. (:issue:`4976`, + :issue:`5315`) + +- The :command:`genspider` command now supports a start URL instead of a + domain name. (:issue:`4439`) + +- :mod:`scrapy.utils.defer` gained 2 new functions, + :func:`~scrapy.utils.defer.deferred_to_future` and + :func:`~scrapy.utils.defer.maybe_deferred_to_future`, to help :ref:`await + on Deferreds when using the asyncio reactor `. + (:issue:`5288`) + +- :ref:`Amazon S3 feed export storage ` gained + support for `temporary security credentials`_ + (:setting:`AWS_SESSION_TOKEN`) and endpoint customization + (:setting:`AWS_ENDPOINT_URL`). (:issue:`4998`, :issue:`5210`) + + .. _temporary security credentials: https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#temporary-access-keys + +- New :setting:`LOG_FILE_APPEND` setting to allow truncating the log file. + (:issue:`5279`) + +- :attr:`Request.cookies ` values that are + :class:`bool`, :class:`float` or :class:`int` are cast to :class:`str`. + (:issue:`5252`, :issue:`5253`) + +- You may now raise :exc:`~scrapy.exceptions.CloseSpider` from a handler of + the :signal:`spider_idle` signal to customize the reason why the spider is + stopping. (:issue:`5191`) + +- When using + :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware`, the + proxy URL for non-HTTPS HTTP/1.1 requests no longer needs to include a URL + scheme. (:issue:`4505`, :issue:`4649`) + +- All built-in queues now expose a ``peek`` method that returns the next + queue object (like ``pop``) but does not remove the returned object from + the queue. (:issue:`5112`) + + If the underlying queue does not support peeking (e.g. because you are not + using ``queuelib`` 1.6.1 or later), the ``peek`` method raises + :exc:`NotImplementedError`. + +- :class:`~scrapy.http.Request` and :class:`~scrapy.http.Response` now have + an ``attributes`` attribute that makes subclassing easier. For + :class:`~scrapy.http.Request`, it also allows subclasses to work with + :func:`scrapy.utils.request.request_from_dict`. (:issue:`1877`, + :issue:`5130`, :issue:`5218`) + +- The :meth:`~scrapy.core.scheduler.BaseScheduler.open` and + :meth:`~scrapy.core.scheduler.BaseScheduler.close` methods of the + :ref:`scheduler ` are now optional. (:issue:`3559`) + +- HTTP/1.1 :exc:`~scrapy.core.downloader.handlers.http11.TunnelError` + exceptions now only truncate response bodies longer than 1000 characters, + instead of those longer than 32 characters, making it easier to debug such + errors. (:issue:`4881`, :issue:`5007`) + +- :class:`~scrapy.loader.ItemLoader` now supports non-text responses. + (:issue:`5145`, :issue:`5269`) + + +Bug fixes +~~~~~~~~~ + +- The :setting:`TWISTED_REACTOR` and :setting:`ASYNCIO_EVENT_LOOP` settings + are no longer ignored if defined in :attr:`~scrapy.Spider.custom_settings`. + (:issue:`4485`, :issue:`5352`) + +- Removed a module-level Twisted reactor import that could prevent + :ref:`using the asyncio reactor `. (:issue:`5357`) + +- The :command:`startproject` command works with existing folders again. + (:issue:`4665`, :issue:`4676`) + +- The :setting:`FEED_URI_PARAMS` setting now behaves as documented. + (:issue:`4962`, :issue:`4966`) + +- :attr:`Request.cb_kwargs ` once again allows the + ``callback`` keyword. (:issue:`5237`, :issue:`5251`, :issue:`5264`) + +- Made :func:`scrapy.utils.response.open_in_browser` support more complex + HTML. (:issue:`5319`, :issue:`5320`) + +- Fixed :attr:`CSVFeedSpider.quotechar + ` being interpreted as the CSV file + encoding. (:issue:`5391`, :issue:`5394`) + +- Added missing setuptools_ to the list of dependencies. (:issue:`5122`) + + .. _setuptools: https://pypi.org/project/setuptools/ + +- :class:`LinkExtractor ` + now also works as expected with links that have comma-separated ``rel`` + attribute values including ``nofollow``. (:issue:`5225`) + +- Fixed a :exc:`TypeError` that could be raised during :ref:`feed export + ` parameter parsing. (:issue:`5359`) + + +Documentation +~~~~~~~~~~~~~ + +- :ref:`asyncio support ` is no longer considered + experimental. (:issue:`5332`) + +- Included :ref:`Windows-specific help for asyncio usage `. + (:issue:`4976`, :issue:`5315`) + +- Rewrote :ref:`topics-headless-browsing` with up-to-date best practices. + (:issue:`4484`, :issue:`4613`) + +- Documented :ref:`local file naming in media pipelines + `. (:issue:`5069`, :issue:`5152`) + +- :ref:`faq` now covers spider file name collision issues. (:issue:`2680`, + :issue:`3669`) + +- Provided better context and instructions to disable the + :setting:`URLLENGTH_LIMIT` setting. (:issue:`5135`, :issue:`5250`) + +- Documented that :ref:`reppy-parser` does not support Python 3.9+. + (:issue:`5226`, :issue:`5231`) + +- Documented :ref:`the scheduler component `. + (:issue:`3537`, :issue:`3559`) + +- Documented the method used by :ref:`media pipelines + ` to :ref:`determine if a file has expired + `. (:issue:`5120`, :issue:`5254`) + +- :ref:`run-multiple-spiders` now features + :func:`scrapy.utils.project.get_project_settings` usage. (:issue:`5070`) + +- :ref:`run-multiple-spiders` now covers what happens when you define + different per-spider values for some settings that cannot differ at run + time. (:issue:`4485`, :issue:`5352`) + +- Extended the documentation of the + :class:`~scrapy.extensions.statsmailer.StatsMailer` extension. + (:issue:`5199`, :issue:`5217`) + +- Added :setting:`JOBDIR` to :ref:`topics-settings`. (:issue:`5173`, + :issue:`5224`) + +- Documented :attr:`Spider.attribute `. + (:issue:`5174`, :issue:`5244`) + +- Documented :attr:`TextResponse.urljoin `. + (:issue:`1582`) + +- Added the ``body_length`` parameter to the documented signature of the + :signal:`headers_received` signal. (:issue:`5270`) + +- Clarified :meth:`SelectorList.get ` usage + in the :ref:`tutorial `. (:issue:`5256`) + +- The documentation now features the shortest import path of classes with + multiple import paths. (:issue:`2733`, :issue:`5099`) + +- ``quotes.toscrape.com`` references now use HTTPS instead of HTTP. + (:issue:`5395`, :issue:`5396`) + +- Added a link to `our Discord server `_ + to :ref:`getting-help`. (:issue:`5421`, :issue:`5422`) + +- The pronunciation of the project name is now :ref:`officially + ` /ˈskreɪpaɪ/. (:issue:`5280`, :issue:`5281`) + +- Added the Scrapy logo to the README. (:issue:`5255`, :issue:`5258`) + +- Fixed issues and implemented minor improvements. (:issue:`3155`, + :issue:`4335`, :issue:`5074`, :issue:`5098`, :issue:`5134`, :issue:`5180`, + :issue:`5194`, :issue:`5239`, :issue:`5266`, :issue:`5271`, :issue:`5273`, + :issue:`5274`, :issue:`5276`, :issue:`5347`, :issue:`5356`, :issue:`5414`, + :issue:`5415`, :issue:`5416`, :issue:`5419`, :issue:`5420`) + + +Quality Assurance +~~~~~~~~~~~~~~~~~ + +- Added support for Python 3.10. (:issue:`5212`, :issue:`5221`, + :issue:`5265`) + +- Significantly reduced memory usage by + :func:`scrapy.utils.response.response_httprepr`, used by the + :class:`~scrapy.downloadermiddlewares.stats.DownloaderStats` downloader + middleware, which is enabled by default. (:issue:`4964`, :issue:`4972`) + +- Removed uses of the deprecated :mod:`optparse` module. (:issue:`5366`, + :issue:`5374`) + +- Extended typing hints. (:issue:`5077`, :issue:`5090`, :issue:`5100`, + :issue:`5108`, :issue:`5171`, :issue:`5215`, :issue:`5334`) + +- Improved tests, fixed CI issues, removed unused code. (:issue:`5094`, + :issue:`5157`, :issue:`5162`, :issue:`5198`, :issue:`5207`, :issue:`5208`, + :issue:`5229`, :issue:`5298`, :issue:`5299`, :issue:`5310`, :issue:`5316`, + :issue:`5333`, :issue:`5388`, :issue:`5389`, :issue:`5400`, :issue:`5401`, + :issue:`5404`, :issue:`5405`, :issue:`5407`, :issue:`5410`, :issue:`5412`, + :issue:`5425`, :issue:`5427`) + +- Implemented improvements for contributors. (:issue:`5080`, :issue:`5082`, + :issue:`5177`, :issue:`5200`) + +- Implemented cleanups. (:issue:`5095`, :issue:`5106`, :issue:`5209`, + :issue:`5228`, :issue:`5235`, :issue:`5245`, :issue:`5246`, :issue:`5292`, + :issue:`5314`, :issue:`5322`) + + +.. _release-2.5.1: + +Scrapy 2.5.1 (2021-10-05) +------------------------- + +* **Security bug fix:** + + If you use + :class:`~scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware` + (i.e. the ``http_user`` and ``http_pass`` spider attributes) for HTTP + authentication, any request exposes your credentials to the request target. + + To prevent unintended exposure of authentication credentials to unintended + domains, you must now additionally set a new, additional spider attribute, + ``http_auth_domain``, and point it to the specific domain to which the + authentication credentials must be sent. + + If the ``http_auth_domain`` spider attribute is not set, the domain of the + first request will be considered the HTTP authentication target, and + authentication credentials will only be sent in requests targeting that + domain. + + If you need to send the same HTTP authentication credentials to multiple + domains, you can use :func:`w3lib.http.basic_auth_header` instead to + set the value of the ``Authorization`` header of your requests. + + If you *really* want your spider to send the same HTTP authentication + credentials to any domain, set the ``http_auth_domain`` spider attribute + to ``None``. + + Finally, if you are a user of `scrapy-splash`_, know that this version of + Scrapy breaks compatibility with scrapy-splash 0.7.2 and earlier. You will + need to upgrade scrapy-splash to a greater version for it to continue to + work. + +.. _scrapy-splash: https://github.com/scrapy-plugins/scrapy-splash + + +.. _release-2.5.0: + +Scrapy 2.5.0 (2021-04-06) +------------------------- + +Highlights: + +- Official Python 3.9 support + +- Experimental :ref:`HTTP/2 support ` + +- New :func:`~scrapy.downloadermiddlewares.retry.get_retry_request` function + to retry requests from spider callbacks + +- New :class:`~scrapy.signals.headers_received` signal that allows stopping + downloads early + +- New :class:`Response.protocol ` attribute + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +- Removed all code that :ref:`was deprecated in 1.7.0 <1.7-deprecations>` and + had not :ref:`already been removed in 2.4.0 <2.4-deprecation-removals>`. + (:issue:`4901`) + +- Removed support for the ``SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE`` environment + variable, :ref:`deprecated in 1.8.0 <1.8-deprecations>`. (:issue:`4912`) + + +Deprecations +~~~~~~~~~~~~ + +- The :mod:`scrapy.utils.py36` module is now deprecated in favor of + :mod:`scrapy.utils.asyncgen`. (:issue:`4900`) + + +New features +~~~~~~~~~~~~ + +- Experimental :ref:`HTTP/2 support ` through a new download handler + that can be assigned to the ``https`` protocol in the + :setting:`DOWNLOAD_HANDLERS` setting. + (:issue:`1854`, :issue:`4769`, :issue:`5058`, :issue:`5059`, :issue:`5066`) + +- The new :func:`scrapy.downloadermiddlewares.retry.get_retry_request` + function may be used from spider callbacks or middlewares to handle the + retrying of a request beyond the scenarios that + :class:`~scrapy.downloadermiddlewares.retry.RetryMiddleware` supports. + (:issue:`3590`, :issue:`3685`, :issue:`4902`) + +- The new :class:`~scrapy.signals.headers_received` signal gives early access + to response headers and allows :ref:`stopping downloads + `. + (:issue:`1772`, :issue:`4897`) + +- The new :attr:`Response.protocol ` + attribute gives access to the string that identifies the protocol used to + download a response. (:issue:`4878`) + +- :ref:`Stats ` now include the following entries that indicate + the number of successes and failures in storing + :ref:`feeds `:: + + feedexport/success_count/ + feedexport/failed_count/ + + Where ```` is the feed storage backend class name, such as + :class:`~scrapy.extensions.feedexport.FileFeedStorage` or + :class:`~scrapy.extensions.feedexport.FTPFeedStorage`. + + (:issue:`3947`, :issue:`4850`) + +- The :class:`~scrapy.spidermiddlewares.urllength.UrlLengthMiddleware` spider + middleware now logs ignored URLs with ``INFO`` :ref:`logging level + ` instead of ``DEBUG``, and it now includes the following entry + into :ref:`stats ` to keep track of the number of ignored + URLs:: + + urllength/request_ignored_count + + (:issue:`5036`) + +- The + :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware` + downloader middleware now logs the number of decompressed responses and the + total count of resulting bytes:: + + httpcompression/response_bytes + httpcompression/response_count + + (:issue:`4797`, :issue:`4799`) + + +Bug fixes +~~~~~~~~~ + +- Fixed installation on PyPy installing PyDispatcher in addition to + PyPyDispatcher, which could prevent Scrapy from working depending on which + package got imported. (:issue:`4710`, :issue:`4814`) + +- When inspecting a callback to check if it is a generator that also returns + a value, an exception is no longer raised if the callback has a docstring + with lower indentation than the following code. + (:issue:`4477`, :issue:`4935`) + +- The `Content-Length `_ + header is no longer omitted from responses when using the default, HTTP/1.1 + download handler (see :setting:`DOWNLOAD_HANDLERS`). + (:issue:`5009`, :issue:`5034`, :issue:`5045`, :issue:`5057`, :issue:`5062`) + +- Setting the :reqmeta:`handle_httpstatus_all` request meta key to ``False`` + now has the same effect as not setting it at all, instead of having the + same effect as setting it to ``True``. + (:issue:`3851`, :issue:`4694`) + + +Documentation +~~~~~~~~~~~~~ + +- Added instructions to :ref:`install Scrapy in Windows using pip + `. + (:issue:`4715`, :issue:`4736`) + +- Logging documentation now includes :ref:`additional ways to filter logs + `. + (:issue:`4216`, :issue:`4257`, :issue:`4965`) + +- Covered how to deal with long lists of allowed domains in the :ref:`FAQ + `. (:issue:`2263`, :issue:`3667`) + +- Covered scrapy-bench_ in :ref:`benchmarking`. + (:issue:`4996`, :issue:`5016`) + +- Clarified that one :ref:`extension ` instance is created + per crawler. + (:issue:`5014`) + +- Fixed some errors in examples. + (:issue:`4829`, :issue:`4830`, :issue:`4907`, :issue:`4909`, + :issue:`5008`) + +- Fixed some external links, typos, and so on. + (:issue:`4892`, :issue:`4899`, :issue:`4936`, :issue:`4942`, :issue:`5005`, + :issue:`5063`) + +- The :ref:`list of Request.meta keys ` is now sorted + alphabetically. + (:issue:`5061`, :issue:`5065`) + +- Updated references to Scrapinghub, which is now called Zyte. + (:issue:`4973`, :issue:`5072`) + +- Added a mention to contributors in the README. (:issue:`4956`) + +- Reduced the top margin of lists. (:issue:`4974`) + + +Quality Assurance +~~~~~~~~~~~~~~~~~ + +- Made Python 3.9 support official (:issue:`4757`, :issue:`4759`) + +- Extended typing hints (:issue:`4895`) + +- Fixed deprecated uses of the Twisted API. + (:issue:`4940`, :issue:`4950`, :issue:`5073`) + +- Made our tests run with the new pip resolver. + (:issue:`4710`, :issue:`4814`) + +- Added tests to ensure that :ref:`coroutine support ` + is tested. (:issue:`4987`) + +- Migrated from Travis CI to GitHub Actions. (:issue:`4924`) + +- Fixed CI issues. + (:issue:`4986`, :issue:`5020`, :issue:`5022`, :issue:`5027`, :issue:`5052`, + :issue:`5053`) + +- Implemented code refactorings, style fixes and cleanups. + (:issue:`4911`, :issue:`4982`, :issue:`5001`, :issue:`5002`, :issue:`5076`) + + +.. _release-2.4.1: + +Scrapy 2.4.1 (2020-11-17) +------------------------- + +- Fixed :ref:`feed exports ` overwrite support (:issue:`4845`, :issue:`4857`, :issue:`4859`) + +- Fixed the AsyncIO event loop handling, which could make code hang + (:issue:`4855`, :issue:`4872`) + +- Fixed the IPv6-capable DNS resolver + :class:`~scrapy.resolver.CachingHostnameResolver` for download handlers + that call + :meth:`reactor.resolve ` + (:issue:`4802`, :issue:`4803`) + +- Fixed the output of the :command:`genspider` command showing placeholders + instead of the import path of the generated spider module (:issue:`4874`) + +- Migrated Windows CI from Azure Pipelines to GitHub Actions (:issue:`4869`, + :issue:`4876`) + + +.. _release-2.4.0: + +Scrapy 2.4.0 (2020-10-11) +------------------------- + +Highlights: + +* Python 3.5 support has been dropped. + +* The ``file_path`` method of :ref:`media pipelines ` + can now access the source :ref:`item `. + + This allows you to set a download file path based on item data. + +* The new ``item_export_kwargs`` key of the :setting:`FEEDS` setting allows + to define keyword parameters to pass to :ref:`item exporter classes + ` + +* You can now choose whether :ref:`feed exports ` + overwrite or append to the output file. + + For example, when using the :command:`crawl` or :command:`runspider` + commands, you can use the ``-O`` option instead of ``-o`` to overwrite the + output file. + +* Zstd-compressed responses are now supported if zstandard_ is installed. + +* In settings, where the import path of a class is required, it is now + possible to pass a class object instead. + +Modified requirements +~~~~~~~~~~~~~~~~~~~~~ + +* Python 3.6 or greater is now required; support for Python 3.5 has been + dropped + + As a result: + + - When using PyPy, PyPy 7.2.0 or greater :ref:`is now required + ` + + - For Amazon S3 storage support in :ref:`feed exports + ` or :ref:`media pipelines + `, botocore_ 1.4.87 or greater is now required + + - To use the :ref:`images pipeline `, Pillow_ 4.0.0 or + greater is now required + + (:issue:`4718`, :issue:`4732`, :issue:`4733`, :issue:`4742`, :issue:`4743`, + :issue:`4764`) + + +Backward-incompatible changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* :class:`~scrapy.downloadermiddlewares.cookies.CookiesMiddleware` once again + discards cookies defined in :attr:`Request.headers + `. + + We decided to revert this bug fix, introduced in Scrapy 2.2.0, because it + was reported that the current implementation could break existing code. + + If you need to set cookies for a request, use the :class:`Request.cookies + ` parameter. + + A future version of Scrapy will include a new, better implementation of the + reverted bug fix. + + (:issue:`4717`, :issue:`4823`) + + +.. _2.4-deprecation-removals: + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +* :class:`scrapy.extensions.feedexport.S3FeedStorage` no longer reads the + values of ``access_key`` and ``secret_key`` from the running project + settings when they are not passed to its ``__init__`` method; you must + either pass those parameters to its ``__init__`` method or use + :class:`S3FeedStorage.from_crawler + ` + (:issue:`4356`, :issue:`4411`, :issue:`4688`) + +* :attr:`Rule.process_request ` + no longer admits callables which expect a single ``request`` parameter, + rather than both ``request`` and ``response`` (:issue:`4818`) + + +Deprecations +~~~~~~~~~~~~ + +* In custom :ref:`media pipelines `, signatures that + do not accept a keyword-only ``item`` parameter in any of the methods that + :ref:`now support this parameter ` are now + deprecated (:issue:`4628`, :issue:`4686`) + +* In custom :ref:`feed storage backend classes `, + ``__init__`` method signatures that do not accept a keyword-only + ``feed_options`` parameter are now deprecated (:issue:`547`, :issue:`716`, + :issue:`4512`) + +* The :class:`scrapy.utils.python.WeakKeyCache` class is now deprecated + (:issue:`4684`, :issue:`4701`) + +* The :func:`scrapy.utils.boto.is_botocore` function is now deprecated, use + :func:`scrapy.utils.boto.is_botocore_available` instead (:issue:`4734`, + :issue:`4776`) + + +New features +~~~~~~~~~~~~ + +.. _media-pipeline-item-parameter: + +* The following methods of :ref:`media pipelines ` now + accept an ``item`` keyword-only parameter containing the source + :ref:`item `: + + - In :class:`scrapy.pipelines.files.FilesPipeline`: + + - :meth:`~scrapy.pipelines.files.FilesPipeline.file_downloaded` + + - :meth:`~scrapy.pipelines.files.FilesPipeline.file_path` + + - :meth:`~scrapy.pipelines.files.FilesPipeline.media_downloaded` + + - :meth:`~scrapy.pipelines.files.FilesPipeline.media_to_download` + + - In :class:`scrapy.pipelines.images.ImagesPipeline`: + + - :meth:`~scrapy.pipelines.images.ImagesPipeline.file_downloaded` + + - :meth:`~scrapy.pipelines.images.ImagesPipeline.file_path` + + - :meth:`~scrapy.pipelines.images.ImagesPipeline.get_images` + + - :meth:`~scrapy.pipelines.images.ImagesPipeline.image_downloaded` + + - :meth:`~scrapy.pipelines.images.ImagesPipeline.media_downloaded` + + - :meth:`~scrapy.pipelines.images.ImagesPipeline.media_to_download` + + (:issue:`4628`, :issue:`4686`) + +* The new ``item_export_kwargs`` key of the :setting:`FEEDS` setting allows + to define keyword parameters to pass to :ref:`item exporter classes + ` (:issue:`4606`, :issue:`4768`) + +* :ref:`Feed exports ` gained overwrite support: + + * When using the :command:`crawl` or :command:`runspider` commands, you + can use the ``-O`` option instead of ``-o`` to overwrite the output + file + + * You can use the ``overwrite`` key in the :setting:`FEEDS` setting to + configure whether to overwrite the output file (``True``) or append to + its content (``False``) + + * The ``__init__`` and ``from_crawler`` methods of :ref:`feed storage + backend classes ` now receive a new keyword-only + parameter, ``feed_options``, which is a dictionary of :ref:`feed + options ` + + (:issue:`547`, :issue:`716`, :issue:`4512`) + +* Zstd-compressed responses are now supported if zstandard_ is installed + (:issue:`4831`) + +* In settings, where the import path of a class is required, it is now + possible to pass a class object instead (:issue:`3870`, :issue:`3873`). + + This includes also settings where only part of its value is made of an + import path, such as :setting:`DOWNLOADER_MIDDLEWARES` or + :setting:`DOWNLOAD_HANDLERS`. + +* :ref:`Downloader middlewares ` can now + override :class:`response.request `. + + If a :ref:`downloader middleware ` returns + a :class:`~scrapy.http.Response` object from + :meth:`~scrapy.downloadermiddlewares.DownloaderMiddleware.process_response` + or + :meth:`~scrapy.downloadermiddlewares.DownloaderMiddleware.process_exception` + with a custom :class:`~scrapy.http.Request` object assigned to + :class:`response.request `: + + - The response is handled by the callback of that custom + :class:`~scrapy.http.Request` object, instead of being handled by the + callback of the original :class:`~scrapy.http.Request` object + + - That custom :class:`~scrapy.http.Request` object is now sent as the + ``request`` argument to the :signal:`response_received` signal, instead + of the original :class:`~scrapy.http.Request` object + + (:issue:`4529`, :issue:`4632`) + +* When using the :ref:`FTP feed storage backend `: + + - It is now possible to set the new ``overwrite`` :ref:`feed option + ` to ``False`` to append to an existing file instead of + overwriting it + + - The FTP password can now be omitted if it is not necessary + + (:issue:`547`, :issue:`716`, :issue:`4512`) + +* The ``__init__`` method of :class:`~scrapy.exporters.CsvItemExporter` now + supports an ``errors`` parameter to indicate how to handle encoding errors + (:issue:`4755`) + +* When :ref:`using asyncio `, it is now possible to + :ref:`set a custom asyncio loop ` (:issue:`4306`, + :issue:`4414`) + +* Serialized requests (see :ref:`topics-jobs`) now support callbacks that are + spider methods that delegate on other callable (:issue:`4756`) + +* When a response is larger than :setting:`DOWNLOAD_MAXSIZE`, the logged + message is now a warning, instead of an error (:issue:`3874`, + :issue:`3886`, :issue:`4752`) + + +Bug fixes +~~~~~~~~~ + +* The :command:`genspider` command no longer overwrites existing files + unless the ``--force`` option is used (:issue:`4561`, :issue:`4616`, + :issue:`4623`) + +* Cookies with an empty value are no longer considered invalid cookies + (:issue:`4772`) + +* The :command:`runspider` command now supports files with the ``.pyw`` file + extension (:issue:`4643`, :issue:`4646`) + +* The :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` + middleware now simply ignores unsupported proxy values (:issue:`3331`, + :issue:`4778`) + +* Checks for generator callbacks with a ``return`` statement no longer warn + about ``return`` statements in nested functions (:issue:`4720`, + :issue:`4721`) + +* The system file mode creation mask no longer affects the permissions of + files generated using the :command:`startproject` command (:issue:`4722`) + +* :func:`scrapy.utils.iterators.xmliter` now supports namespaced node names + (:issue:`861`, :issue:`4746`) + +* :class:`~scrapy.Request` objects can now have ``about:`` URLs, which can + work when using a headless browser (:issue:`4835`) + + +Documentation +~~~~~~~~~~~~~ + +* The :setting:`FEED_URI_PARAMS` setting is now documented (:issue:`4671`, + :issue:`4724`) + +* Improved the documentation of + :ref:`link extractors ` with an usage example from + a spider callback and reference documentation for the + :class:`~scrapy.link.Link` class (:issue:`4751`, :issue:`4775`) + +* Clarified the impact of :setting:`CONCURRENT_REQUESTS` when using the + :class:`~scrapy.extensions.closespider.CloseSpider` extension + (:issue:`4836`) + +* Removed references to Python 2’s ``unicode`` type (:issue:`4547`, + :issue:`4703`) + +* We now have an :ref:`official deprecation policy ` + (:issue:`4705`) + +* Our :ref:`documentation policies ` now cover usage + of Sphinx’s :rst:dir:`versionadded` and :rst:dir:`versionchanged` + directives, and we have removed usages referencing Scrapy 1.4.0 and earlier + versions (:issue:`3971`, :issue:`4310`) + +* Other documentation cleanups (:issue:`4090`, :issue:`4782`, :issue:`4800`, + :issue:`4801`, :issue:`4809`, :issue:`4816`, :issue:`4825`) + + +Quality assurance +~~~~~~~~~~~~~~~~~ + +* Extended typing hints (:issue:`4243`, :issue:`4691`) + +* Added tests for the :command:`check` command (:issue:`4663`) + +* Fixed test failures on Debian (:issue:`4726`, :issue:`4727`, :issue:`4735`) + +* Improved Windows test coverage (:issue:`4723`) + +* Switched to :ref:`formatted string literals ` where possible + (:issue:`4307`, :issue:`4324`, :issue:`4672`) + +* Modernized :func:`super` usage (:issue:`4707`) + +* Other code and test cleanups (:issue:`1790`, :issue:`3288`, :issue:`4165`, + :issue:`4564`, :issue:`4651`, :issue:`4714`, :issue:`4738`, :issue:`4745`, + :issue:`4747`, :issue:`4761`, :issue:`4765`, :issue:`4804`, :issue:`4817`, + :issue:`4820`, :issue:`4822`, :issue:`4839`) + + +.. _release-2.3.0: + +Scrapy 2.3.0 (2020-08-04) +------------------------- + +Highlights: + +* :ref:`Feed exports ` now support :ref:`Google Cloud + Storage ` as a storage backend + +* The new :setting:`FEED_EXPORT_BATCH_ITEM_COUNT` setting allows to deliver + output items in batches of up to the specified number of items. + + It also serves as a workaround for :ref:`delayed file delivery + `, which causes Scrapy to only start item delivery + after the crawl has finished when using certain storage backends + (:ref:`S3 `, :ref:`FTP `, + and now :ref:`GCS `). + +* The base implementation of :ref:`item loaders ` has been + moved into a separate library, :doc:`itemloaders `, + allowing usage from outside Scrapy and a separate release schedule + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +* Removed the following classes and their parent modules from + ``scrapy.linkextractors``: + + * ``htmlparser.HtmlParserLinkExtractor`` + * ``regex.RegexLinkExtractor`` + * ``sgml.BaseSgmlLinkExtractor`` + * ``sgml.SgmlLinkExtractor`` + + Use + :class:`LinkExtractor ` + instead (:issue:`4356`, :issue:`4679`) + + +Deprecations +~~~~~~~~~~~~ + +* The ``scrapy.utils.python.retry_on_eintr`` function is now deprecated + (:issue:`4683`) + + +New features +~~~~~~~~~~~~ + +* :ref:`Feed exports ` support :ref:`Google Cloud + Storage ` (:issue:`685`, :issue:`3608`) + +* New :setting:`FEED_EXPORT_BATCH_ITEM_COUNT` setting for batch deliveries + (:issue:`4250`, :issue:`4434`) + +* The :command:`parse` command now allows specifying an output file + (:issue:`4317`, :issue:`4377`) + +* :meth:`Request.from_curl ` and + :func:`~scrapy.utils.curl.curl_to_request_kwargs` now also support + ``--data-raw`` (:issue:`4612`) + +* A ``parse`` callback may now be used in built-in spider subclasses, such + as :class:`~scrapy.spiders.CrawlSpider` (:issue:`712`, :issue:`732`, + :issue:`781`, :issue:`4254` ) + + +Bug fixes +~~~~~~~~~ + +* Fixed the :ref:`CSV exporting ` of + :ref:`dataclass items ` and :ref:`attr.s items + ` (:issue:`4667`, :issue:`4668`) + +* :meth:`Request.from_curl ` and + :func:`~scrapy.utils.curl.curl_to_request_kwargs` now set the request + method to ``POST`` when a request body is specified and no request method + is specified (:issue:`4612`) + +* The processing of ANSI escape sequences in enabled in Windows 10.0.14393 + and later, where it is required for colored output (:issue:`4393`, + :issue:`4403`) + + +Documentation +~~~~~~~~~~~~~ + +* Updated the `OpenSSL cipher list format`_ link in the documentation about + the :setting:`DOWNLOADER_CLIENT_TLS_CIPHERS` setting (:issue:`4653`) + +* Simplified the code example in :ref:`topics-loaders-dataclass` + (:issue:`4652`) + +.. _OpenSSL cipher list format: https://www.openssl.org/docs/manmaster/man1/openssl-ciphers.html#CIPHER-LIST-FORMAT + + +Quality assurance +~~~~~~~~~~~~~~~~~ + +* The base implementation of :ref:`item loaders ` has been + moved into :doc:`itemloaders ` (:issue:`4005`, + :issue:`4516`) + +* Fixed a silenced error in some scheduler tests (:issue:`4644`, + :issue:`4645`) + +* Renewed the localhost certificate used for SSL tests (:issue:`4650`) + +* Removed cookie-handling code specific to Python 2 (:issue:`4682`) + +* Stopped using Python 2 unicode literal syntax (:issue:`4704`) + +* Stopped using a backlash for line continuation (:issue:`4673`) + +* Removed unneeded entries from the MyPy exception list (:issue:`4690`) + +* Automated tests now pass on Windows as part of our continuous integration + system (:issue:`4458`) + +* Automated tests now pass on the latest PyPy version for supported Python + versions in our continuous integration system (:issue:`4504`) + + +.. _release-2.2.1: + +Scrapy 2.2.1 (2020-07-17) +------------------------- + +* The :command:`startproject` command no longer makes unintended changes to + the permissions of files in the destination folder, such as removing + execution permissions (:issue:`4662`, :issue:`4666`) + + +.. _release-2.2.0: + +Scrapy 2.2.0 (2020-06-24) +------------------------- + +Highlights: + +* Python 3.5.2+ is required now +* :ref:`dataclass objects ` and + :ref:`attrs objects ` are now valid :ref:`item types + ` +* New :meth:`TextResponse.json ` method +* New :signal:`bytes_received` signal that allows canceling response download +* :class:`~scrapy.downloadermiddlewares.cookies.CookiesMiddleware` fixes + +Backward-incompatible changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Support for Python 3.5.0 and 3.5.1 has been dropped; Scrapy now refuses to + run with a Python version lower than 3.5.2, which introduced + :class:`typing.Type` (:issue:`4615`) + + +Deprecations +~~~~~~~~~~~~ + +* :meth:`TextResponse.body_as_unicode + ` is now deprecated, use + :attr:`TextResponse.text ` instead + (:issue:`4546`, :issue:`4555`, :issue:`4579`) + +* :class:`scrapy.item.BaseItem` is now deprecated, use + :class:`scrapy.item.Item` instead (:issue:`4534`) + + +New features +~~~~~~~~~~~~ + +* :ref:`dataclass objects ` and + :ref:`attrs objects ` are now valid :ref:`item types + `, and a new itemadapter_ library makes it easy to + write code that :ref:`supports any item type ` + (:issue:`2749`, :issue:`2807`, :issue:`3761`, :issue:`3881`, :issue:`4642`) + +* A new :meth:`TextResponse.json ` method + allows to deserialize JSON responses (:issue:`2444`, :issue:`4460`, + :issue:`4574`) + +* A new :signal:`bytes_received` signal allows monitoring response download + progress and :ref:`stopping downloads ` + (:issue:`4205`, :issue:`4559`) + +* The dictionaries in the result list of a :ref:`media pipeline + ` now include a new key, ``status``, which indicates + if the file was downloaded or, if the file was not downloaded, why it was + not downloaded; see :meth:`FilesPipeline.get_media_requests + ` for more + information (:issue:`2893`, :issue:`4486`) + +* When using :ref:`Google Cloud Storage ` for + a :ref:`media pipeline `, a warning is now logged if + the configured credentials do not grant the required permissions + (:issue:`4346`, :issue:`4508`) + +* :ref:`Link extractors ` are now serializable, + as long as you do not use :ref:`lambdas ` for parameters; for + example, you can now pass link extractors in :attr:`Request.cb_kwargs + ` or + :attr:`Request.meta ` when :ref:`persisting + scheduled requests ` (:issue:`4554`) + +* Upgraded the :ref:`pickle protocol ` that Scrapy uses + from protocol 2 to protocol 4, improving serialization capabilities and + performance (:issue:`4135`, :issue:`4541`) + +* :func:`scrapy.utils.misc.create_instance` now raises a :exc:`TypeError` + exception if the resulting instance is ``None`` (:issue:`4528`, + :issue:`4532`) + +.. _itemadapter: https://github.com/scrapy/itemadapter + + +Bug fixes +~~~~~~~~~ + +* :class:`~scrapy.downloadermiddlewares.cookies.CookiesMiddleware` no longer + discards cookies defined in :attr:`Request.headers + ` (:issue:`1992`, :issue:`2400`) + +* :class:`~scrapy.downloadermiddlewares.cookies.CookiesMiddleware` no longer + re-encodes cookies defined as :class:`bytes` in the ``cookies`` parameter + of the ``__init__`` method of :class:`~scrapy.http.Request` + (:issue:`2400`, :issue:`3575`) + +* When :setting:`FEEDS` defines multiple URIs, :setting:`FEED_STORE_EMPTY` is + ``False`` and the crawl yields no items, Scrapy no longer stops feed + exports after the first URI (:issue:`4621`, :issue:`4626`) + +* :class:`~scrapy.spiders.Spider` callbacks defined using :doc:`coroutine + syntax ` no longer need to return an iterable, and may + instead return a :class:`~scrapy.http.Request` object, an + :ref:`item `, or ``None`` (:issue:`4609`) + +* The :command:`startproject` command now ensures that the generated project + folders and files have the right permissions (:issue:`4604`) + +* Fix a :exc:`KeyError` exception being sometimes raised from + :class:`scrapy.utils.datatypes.LocalWeakReferencedCache` (:issue:`4597`, + :issue:`4599`) + +* When :setting:`FEEDS` defines multiple URIs, log messages about items being + stored now contain information from the corresponding feed, instead of + always containing information about only one of the feeds (:issue:`4619`, + :issue:`4629`) + + +Documentation +~~~~~~~~~~~~~ + +* Added a new section about :ref:`accessing cb_kwargs from errbacks + ` (:issue:`4598`, :issue:`4634`) + +* Covered chompjs_ in :ref:`topics-parsing-javascript` (:issue:`4556`, + :issue:`4562`) + +* Removed from :doc:`topics/coroutines` the warning about the API being + experimental (:issue:`4511`, :issue:`4513`) + +* Removed references to unsupported versions of :doc:`Twisted + ` (:issue:`4533`) + +* Updated the description of the :ref:`screenshot pipeline example + `, which now uses :doc:`coroutine syntax + ` instead of returning a + :class:`~twisted.internet.defer.Deferred` (:issue:`4514`, :issue:`4593`) + +* Removed a misleading import line from the + :func:`scrapy.utils.log.configure_logging` code example (:issue:`4510`, + :issue:`4587`) + +* The display-on-hover behavior of internal documentation references now also + covers links to :ref:`commands `, :attr:`Request.meta + ` keys, :ref:`settings ` and + :ref:`signals ` (:issue:`4495`, :issue:`4563`) + +* It is again possible to download the documentation for offline reading + (:issue:`4578`, :issue:`4585`) + +* Removed backslashes preceding ``*args`` and ``**kwargs`` in some function + and method signatures (:issue:`4592`, :issue:`4596`) + +.. _chompjs: https://github.com/Nykakin/chompjs + + +Quality assurance +~~~~~~~~~~~~~~~~~ + +* Adjusted the code base further to our :ref:`style guidelines + ` (:issue:`4237`, :issue:`4525`, :issue:`4538`, + :issue:`4539`, :issue:`4540`, :issue:`4542`, :issue:`4543`, :issue:`4544`, + :issue:`4545`, :issue:`4557`, :issue:`4558`, :issue:`4566`, :issue:`4568`, + :issue:`4572`) + +* Removed remnants of Python 2 support (:issue:`4550`, :issue:`4553`, + :issue:`4568`) + +* Improved code sharing between the :command:`crawl` and :command:`runspider` + commands (:issue:`4548`, :issue:`4552`) + +* Replaced ``chain(*iterable)`` with ``chain.from_iterable(iterable)`` + (:issue:`4635`) + +* You may now run the :mod:`asyncio` tests with Tox on any Python version + (:issue:`4521`) + +* Updated test requirements to reflect an incompatibility with pytest 5.4 and + 5.4.1 (:issue:`4588`) + +* Improved :class:`~scrapy.spiderloader.SpiderLoader` test coverage for + scenarios involving duplicate spider names (:issue:`4549`, :issue:`4560`) + +* Configured Travis CI to also run the tests with Python 3.5.2 + (:issue:`4518`, :issue:`4615`) + +* Added a `Pylint `_ job to Travis CI + (:issue:`3727`) + +* Added a `Mypy `_ job to Travis CI (:issue:`4637`) + +* Made use of set literals in tests (:issue:`4573`) + +* Cleaned up the Travis CI configuration (:issue:`4517`, :issue:`4519`, + :issue:`4522`, :issue:`4537`) + + +.. _release-2.1.0: + +Scrapy 2.1.0 (2020-04-24) +------------------------- + +Highlights: + +* New :setting:`FEEDS` setting to export to multiple feeds +* New :attr:`Response.ip_address ` attribute + +Backward-incompatible changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* :exc:`AssertionError` exceptions triggered by :ref:`assert ` + statements have been replaced by new exception types, to support running + Python in optimized mode (see :option:`-O`) without changing Scrapy’s + behavior in any unexpected ways. + + If you catch an :exc:`AssertionError` exception from Scrapy, update your + code to catch the corresponding new exception. + + (:issue:`4440`) + + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +* The ``LOG_UNSERIALIZABLE_REQUESTS`` setting is no longer supported, use + :setting:`SCHEDULER_DEBUG` instead (:issue:`4385`) + +* The ``REDIRECT_MAX_METAREFRESH_DELAY`` setting is no longer supported, use + :setting:`METAREFRESH_MAXDELAY` instead (:issue:`4385`) + +* The :class:`~scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware` + middleware has been removed, including the entire + :class:`scrapy.downloadermiddlewares.chunked` module; chunked transfers + work out of the box (:issue:`4431`) + +* The ``spiders`` property has been removed from + :class:`~scrapy.crawler.Crawler`, use :class:`CrawlerRunner.spider_loader + ` or instantiate + :setting:`SPIDER_LOADER_CLASS` with your settings instead (:issue:`4398`) + +* The ``MultiValueDict``, ``MultiValueDictKeyError``, and ``SiteNode`` + classes have been removed from :mod:`scrapy.utils.datatypes` + (:issue:`4400`) + + +Deprecations +~~~~~~~~~~~~ + +* The ``FEED_FORMAT`` and ``FEED_URI`` settings have been deprecated in + favor of the new :setting:`FEEDS` setting (:issue:`1336`, :issue:`3858`, + :issue:`4507`) + + +New features +~~~~~~~~~~~~ + +* A new setting, :setting:`FEEDS`, allows configuring multiple output feeds + with different settings each (:issue:`1336`, :issue:`3858`, :issue:`4507`) + +* The :command:`crawl` and :command:`runspider` commands now support multiple + ``-o`` parameters (:issue:`1336`, :issue:`3858`, :issue:`4507`) + +* The :command:`crawl` and :command:`runspider` commands now support + specifying an output format by appending ``:`` to the output file + (:issue:`1336`, :issue:`3858`, :issue:`4507`) + +* The new :attr:`Response.ip_address ` + attribute gives access to the IP address that originated a response + (:issue:`3903`, :issue:`3940`) + +* A warning is now issued when a value in + :attr:`~scrapy.spiders.Spider.allowed_domains` includes a port + (:issue:`50`, :issue:`3198`, :issue:`4413`) + +* Zsh completion now excludes used option aliases from the completion list + (:issue:`4438`) + + +Bug fixes +~~~~~~~~~ + +* :ref:`Request serialization ` no longer breaks for + callbacks that are spider attributes which are assigned a function with a + different name (:issue:`4500`) + +* ``None`` values in :attr:`~scrapy.spiders.Spider.allowed_domains` no longer + cause a :exc:`TypeError` exception (:issue:`4410`) + +* Zsh completion no longer allows options after arguments (:issue:`4438`) + +* zope.interface 5.0.0 and later versions are now supported + (:issue:`4447`, :issue:`4448`) + +* ``Spider.make_requests_from_url``, deprecated in Scrapy 1.4.0, now issues a + warning when used (:issue:`4412`) + + +Documentation +~~~~~~~~~~~~~ + +* Improved the documentation about signals that allow their handlers to + return a :class:`~twisted.internet.defer.Deferred` (:issue:`4295`, + :issue:`4390`) + +* Our PyPI entry now includes links for our documentation, our source code + repository and our issue tracker (:issue:`4456`) + +* Covered the `curl2scrapy `_ + service in the documentation (:issue:`4206`, :issue:`4455`) + +* Removed references to the Guppy library, which only works in Python 2 + (:issue:`4285`, :issue:`4343`) + +* Extended use of InterSphinx to link to Python 3 documentation + (:issue:`4444`, :issue:`4445`) + +* Added support for Sphinx 3.0 and later (:issue:`4475`, :issue:`4480`, + :issue:`4496`, :issue:`4503`) + + +Quality assurance +~~~~~~~~~~~~~~~~~ + +* Removed warnings about using old, removed settings (:issue:`4404`) + +* Removed a warning about importing + :class:`~twisted.internet.testing.StringTransport` from + ``twisted.test.proto_helpers`` in Twisted 19.7.0 or newer (:issue:`4409`) + +* Removed outdated Debian package build files (:issue:`4384`) + +* Removed :class:`object` usage as a base class (:issue:`4430`) + +* Removed code that added support for old versions of Twisted that we no + longer support (:issue:`4472`) + +* Fixed code style issues (:issue:`4468`, :issue:`4469`, :issue:`4471`, + :issue:`4481`) + +* Removed :func:`twisted.internet.defer.returnValue` calls (:issue:`4443`, + :issue:`4446`, :issue:`4489`) + + +.. _release-2.0.1: + +Scrapy 2.0.1 (2020-03-18) +------------------------- + +* :meth:`Response.follow_all ` now supports + an empty URL iterable as input (:issue:`4408`, :issue:`4420`) + +* Removed top-level :mod:`~twisted.internet.reactor` imports to prevent + errors about the wrong Twisted reactor being installed when setting a + different Twisted reactor using :setting:`TWISTED_REACTOR` (:issue:`4401`, + :issue:`4406`) + +* Fixed tests (:issue:`4422`) + + +.. _release-2.0.0: + +Scrapy 2.0.0 (2020-03-03) +------------------------- + +Highlights: + +* Python 2 support has been removed +* :doc:`Partial ` :ref:`coroutine syntax ` support + and :doc:`experimental ` :mod:`asyncio` support +* New :meth:`Response.follow_all ` method +* :ref:`FTP support ` for media pipelines +* New :attr:`Response.certificate ` + attribute +* IPv6 support through :setting:`DNS_RESOLVER` + +Backward-incompatible changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Python 2 support has been removed, following `Python 2 end-of-life on + January 1, 2020`_ (:issue:`4091`, :issue:`4114`, :issue:`4115`, + :issue:`4121`, :issue:`4138`, :issue:`4231`, :issue:`4242`, :issue:`4304`, + :issue:`4309`, :issue:`4373`) + +* Retry gaveups (see :setting:`RETRY_TIMES`) are now logged as errors instead + of as debug information (:issue:`3171`, :issue:`3566`) + +* File extensions that + :class:`LinkExtractor ` + ignores by default now also include ``7z``, ``7zip``, ``apk``, ``bz2``, + ``cdr``, ``dmg``, ``ico``, ``iso``, ``tar``, ``tar.gz``, ``webm``, and + ``xz`` (:issue:`1837`, :issue:`2067`, :issue:`4066`) + +* The :setting:`METAREFRESH_IGNORE_TAGS` setting is now an empty list by + default, following web browser behavior (:issue:`3844`, :issue:`4311`) + +* The + :class:`~scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware` + now includes spaces after commas in the value of the ``Accept-Encoding`` + header that it sets, following web browser behavior (:issue:`4293`) + +* The ``__init__`` method of custom download handlers (see + :setting:`DOWNLOAD_HANDLERS`) or subclasses of the following downloader + handlers no longer receives a ``settings`` parameter: + + * :class:`scrapy.core.downloader.handlers.datauri.DataURIDownloadHandler` + + * :class:`scrapy.core.downloader.handlers.file.FileDownloadHandler` + + Use the ``from_settings`` or ``from_crawler`` class methods to expose such + a parameter to your custom download handlers. + + (:issue:`4126`) + +* We have refactored the :class:`scrapy.core.scheduler.Scheduler` class and + related queue classes (see :setting:`SCHEDULER_PRIORITY_QUEUE`, + :setting:`SCHEDULER_DISK_QUEUE` and :setting:`SCHEDULER_MEMORY_QUEUE`) to + make it easier to implement custom scheduler queue classes. See + :ref:`2-0-0-scheduler-queue-changes` below for details. + +* Overridden settings are now logged in a different format. This is more in + line with similar information logged at startup (:issue:`4199`) + +.. _Python 2 end-of-life on January 1, 2020: https://www.python.org/doc/sunset-python-2/ + + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +* The :ref:`Scrapy shell ` no longer provides a `sel` proxy + object, use :meth:`response.selector ` + instead (:issue:`4347`) + +* LevelDB support has been removed (:issue:`4112`) + +* The following functions have been removed from :mod:`scrapy.utils.python`: + ``isbinarytext``, ``is_writable``, ``setattr_default``, ``stringify_dict`` + (:issue:`4362`) + + +Deprecations +~~~~~~~~~~~~ + +* Using environment variables prefixed with ``SCRAPY_`` to override settings + is deprecated (:issue:`4300`, :issue:`4374`, :issue:`4375`) + +* :class:`scrapy.linkextractors.FilteringLinkExtractor` is deprecated, use + :class:`scrapy.linkextractors.LinkExtractor + ` instead (:issue:`4045`) + +* The ``noconnect`` query string argument of proxy URLs is deprecated and + should be removed from proxy URLs (:issue:`4198`) + +* The :meth:`next ` method of + :class:`scrapy.utils.python.MutableChain` is deprecated, use the global + :func:`next` function or :meth:`MutableChain.__next__ + ` instead (:issue:`4153`) + + +New features +~~~~~~~~~~~~ + +* Added :doc:`partial support ` for Python’s + :ref:`coroutine syntax ` and :doc:`experimental support + ` for :mod:`asyncio` and :mod:`asyncio`-powered libraries + (:issue:`4010`, :issue:`4259`, :issue:`4269`, :issue:`4270`, :issue:`4271`, + :issue:`4316`, :issue:`4318`) + +* The new :meth:`Response.follow_all ` + method offers the same functionality as + :meth:`Response.follow ` but supports an + iterable of URLs as input and returns an iterable of requests + (:issue:`2582`, :issue:`4057`, :issue:`4286`) + +* :ref:`Media pipelines ` now support :ref:`FTP + storage ` (:issue:`3928`, :issue:`3961`) + +* The new :attr:`Response.certificate ` + attribute exposes the SSL certificate of the server as a + :class:`twisted.internet.ssl.Certificate` object for HTTPS responses + (:issue:`2726`, :issue:`4054`) + +* A new :setting:`DNS_RESOLVER` setting allows enabling IPv6 support + (:issue:`1031`, :issue:`4227`) + +* A new :setting:`SCRAPER_SLOT_MAX_ACTIVE_SIZE` setting allows configuring + the existing soft limit that pauses request downloads when the total + response data being processed is too high (:issue:`1410`, :issue:`3551`) + +* A new :setting:`TWISTED_REACTOR` setting allows customizing the + :mod:`~twisted.internet.reactor` that Scrapy uses, allowing to + :doc:`enable asyncio support ` or deal with a + :ref:`common macOS issue ` (:issue:`2905`, + :issue:`4294`) + +* Scheduler disk and memory queues may now use the class methods + ``from_crawler`` or ``from_settings`` (:issue:`3884`) + +* The new :attr:`Response.cb_kwargs ` + attribute serves as a shortcut for :attr:`Response.request.cb_kwargs + ` (:issue:`4331`) + +* :meth:`Response.follow ` now supports a + ``flags`` parameter, for consistency with :class:`~scrapy.http.Request` + (:issue:`4277`, :issue:`4279`) + +* :ref:`Item loader processors ` can now be + regular functions, they no longer need to be methods (:issue:`3899`) + +* :class:`~scrapy.spiders.Rule` now accepts an ``errback`` parameter + (:issue:`4000`) + +* :class:`~scrapy.http.Request` no longer requires a ``callback`` parameter + when an ``errback`` parameter is specified (:issue:`3586`, :issue:`4008`) + +* :class:`~scrapy.logformatter.LogFormatter` now supports some additional + methods: + + * :class:`~scrapy.logformatter.LogFormatter.download_error` for + download errors + + * :class:`~scrapy.logformatter.LogFormatter.item_error` for exceptions + raised during item processing by :ref:`item pipelines + ` + + * :class:`~scrapy.logformatter.LogFormatter.spider_error` for exceptions + raised from :ref:`spider callbacks ` + + (:issue:`374`, :issue:`3986`, :issue:`3989`, :issue:`4176`, :issue:`4188`) + +* The :setting:`FEED_URI` setting now supports :class:`pathlib.Path` values + (:issue:`3731`, :issue:`4074`) + +* A new :signal:`request_left_downloader` signal is sent when a request + leaves the downloader (:issue:`4303`) + +* Scrapy logs a warning when it detects a request callback or errback that + uses ``yield`` but also returns a value, since the returned value would be + lost (:issue:`3484`, :issue:`3869`) + +* :class:`~scrapy.spiders.Spider` objects now raise an :exc:`AttributeError` + exception if they do not have a :class:`~scrapy.spiders.Spider.start_urls` + attribute nor reimplement :class:`~scrapy.spiders.Spider.start_requests`, + but have a ``start_url`` attribute (:issue:`4133`, :issue:`4170`) + +* :class:`~scrapy.exporters.BaseItemExporter` subclasses may now use + ``super().__init__(**kwargs)`` instead of ``self._configure(kwargs)`` in + their ``__init__`` method, passing ``dont_fail=True`` to the parent + ``__init__`` method if needed, and accessing ``kwargs`` at ``self._kwargs`` + after calling their parent ``__init__`` method (:issue:`4193`, + :issue:`4370`) + +* A new ``keep_fragments`` parameter of + ``scrapy.utils.request.request_fingerprint`` allows to generate + different fingerprints for requests with different fragments in their URL + (:issue:`4104`) + +* Download handlers (see :setting:`DOWNLOAD_HANDLERS`) may now use the + ``from_settings`` and ``from_crawler`` class methods that other Scrapy + components already supported (:issue:`4126`) + +* :class:`scrapy.utils.python.MutableChain.__iter__` now returns ``self``, + `allowing it to be used as a sequence `_ + (:issue:`4153`) + + +Bug fixes +~~~~~~~~~ + +* The :command:`crawl` command now also exits with exit code 1 when an + exception happens before the crawling starts (:issue:`4175`, :issue:`4207`) + +* :class:`LinkExtractor.extract_links + ` no longer + re-encodes the query string or URLs from non-UTF-8 responses in UTF-8 + (:issue:`998`, :issue:`1403`, :issue:`1949`, :issue:`4321`) + +* The first spider middleware (see :setting:`SPIDER_MIDDLEWARES`) now also + processes exceptions raised from callbacks that are generators + (:issue:`4260`, :issue:`4272`) + +* Redirects to URLs starting with 3 slashes (``///``) are now supported + (:issue:`4032`, :issue:`4042`) + +* :class:`~scrapy.http.Request` no longer accepts strings as ``url`` simply + because they have a colon (:issue:`2552`, :issue:`4094`) + +* The correct encoding is now used for attach names in + :class:`~scrapy.mail.MailSender` (:issue:`4229`, :issue:`4239`) + +* :class:`~scrapy.dupefilters.RFPDupeFilter`, the default + :setting:`DUPEFILTER_CLASS`, no longer writes an extra ``\r`` character on + each line in Windows, which made the size of the ``requests.seen`` file + unnecessarily large on that platform (:issue:`4283`) + +* Z shell auto-completion now looks for ``.html`` files, not ``.http`` files, + and covers the ``-h`` command-line switch (:issue:`4122`, :issue:`4291`) + +* Adding items to a :class:`scrapy.utils.datatypes.LocalCache` object + without a ``limit`` defined no longer raises a :exc:`TypeError` exception + (:issue:`4123`) + +* Fixed a typo in the message of the :exc:`ValueError` exception raised when + :func:`scrapy.utils.misc.create_instance` gets both ``settings`` and + ``crawler`` set to ``None`` (:issue:`4128`) + + +Documentation +~~~~~~~~~~~~~ + +* API documentation now links to an online, syntax-highlighted view of the + corresponding source code (:issue:`4148`) + +* Links to unexisting documentation pages now allow access to the sidebar + (:issue:`4152`, :issue:`4169`) + +* Cross-references within our documentation now display a tooltip when + hovered (:issue:`4173`, :issue:`4183`) + +* Improved the documentation about :meth:`LinkExtractor.extract_links + ` and + simplified :ref:`topics-link-extractors` (:issue:`4045`) + +* Clarified how :class:`ItemLoader.item ` + works (:issue:`3574`, :issue:`4099`) + +* Clarified that :func:`logging.basicConfig` should not be used when also + using :class:`~scrapy.crawler.CrawlerProcess` (:issue:`2149`, + :issue:`2352`, :issue:`3146`, :issue:`3960`) + +* Clarified the requirements for :class:`~scrapy.http.Request` objects + :ref:`when using persistence ` (:issue:`4124`, + :issue:`4139`) + +* Clarified how to install a :ref:`custom image pipeline + ` (:issue:`4034`, :issue:`4252`) + +* Fixed the signatures of the ``file_path`` method in :ref:`media pipeline + ` examples (:issue:`4290`) + +* Covered a backward-incompatible change in Scrapy 1.7.0 affecting custom + :class:`scrapy.core.scheduler.Scheduler` subclasses (:issue:`4274`) + +* Improved the ``README.rst`` and ``CODE_OF_CONDUCT.md`` files + (:issue:`4059`) + +* Documentation examples are now checked as part of our test suite and we + have fixed some of the issues detected (:issue:`4142`, :issue:`4146`, + :issue:`4171`, :issue:`4184`, :issue:`4190`) + +* Fixed logic issues, broken links and typos (:issue:`4247`, :issue:`4258`, + :issue:`4282`, :issue:`4288`, :issue:`4305`, :issue:`4308`, :issue:`4323`, + :issue:`4338`, :issue:`4359`, :issue:`4361`) + +* Improved consistency when referring to the ``__init__`` method of an object + (:issue:`4086`, :issue:`4088`) + +* Fixed an inconsistency between code and output in :ref:`intro-overview` + (:issue:`4213`) + +* Extended :mod:`~sphinx.ext.intersphinx` usage (:issue:`4147`, + :issue:`4172`, :issue:`4185`, :issue:`4194`, :issue:`4197`) + +* We now use a recent version of Python to build the documentation + (:issue:`4140`, :issue:`4249`) + +* Cleaned up documentation (:issue:`4143`, :issue:`4275`) + + +Quality assurance +~~~~~~~~~~~~~~~~~ + +* Re-enabled proxy ``CONNECT`` tests (:issue:`2545`, :issue:`4114`) + +* Added Bandit_ security checks to our test suite (:issue:`4162`, + :issue:`4181`) + +* Added Flake8_ style checks to our test suite and applied many of the + corresponding changes (:issue:`3944`, :issue:`3945`, :issue:`4137`, + :issue:`4157`, :issue:`4167`, :issue:`4174`, :issue:`4186`, :issue:`4195`, + :issue:`4238`, :issue:`4246`, :issue:`4355`, :issue:`4360`, :issue:`4365`) + +* Improved test coverage (:issue:`4097`, :issue:`4218`, :issue:`4236`) + +* Started reporting slowest tests, and improved the performance of some of + them (:issue:`4163`, :issue:`4164`) + +* Fixed broken tests and refactored some tests (:issue:`4014`, :issue:`4095`, + :issue:`4244`, :issue:`4268`, :issue:`4372`) + +* Modified the :doc:`tox ` configuration to allow running tests + with any Python version, run Bandit_ and Flake8_ tests by default, and + enforce a minimum tox version programmatically (:issue:`4179`) + +* Cleaned up code (:issue:`3937`, :issue:`4208`, :issue:`4209`, + :issue:`4210`, :issue:`4212`, :issue:`4369`, :issue:`4376`, :issue:`4378`) + +.. _Bandit: https://bandit.readthedocs.io/ +.. _Flake8: https://flake8.pycqa.org/en/latest/ + + +.. _2-0-0-scheduler-queue-changes: + +Changes to scheduler queue classes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following changes may impact any custom queue classes of all types: + +* The ``push`` method no longer receives a second positional parameter + containing ``request.priority * -1``. If you need that value, get it + from the first positional parameter, ``request``, instead, or use + the new :meth:`~scrapy.core.scheduler.ScrapyPriorityQueue.priority` + method in :class:`scrapy.core.scheduler.ScrapyPriorityQueue` + subclasses. + +The following changes may impact custom priority queue classes: + +* In the ``__init__`` method or the ``from_crawler`` or ``from_settings`` + class methods: + + * The parameter that used to contain a factory function, + ``qfactory``, is now passed as a keyword parameter named + ``downstream_queue_cls``. + + * A new keyword parameter has been added: ``key``. It is a string + that is always an empty string for memory queues and indicates the + :setting:`JOB_DIR` value for disk queues. + + * The parameter for disk queues that contains data from the previous + crawl, ``startprios`` or ``slot_startprios``, is now passed as a + keyword parameter named ``startprios``. + + * The ``serialize`` parameter is no longer passed. The disk queue + class must take care of request serialization on its own before + writing to disk, using the + :func:`~scrapy.utils.reqser.request_to_dict` and + :func:`~scrapy.utils.reqser.request_from_dict` functions from the + :mod:`scrapy.utils.reqser` module. + +The following changes may impact custom disk and memory queue classes: + +* The signature of the ``__init__`` method is now + ``__init__(self, crawler, key)``. + +The following changes affect specifically the +:class:`~scrapy.core.scheduler.ScrapyPriorityQueue` and +:class:`~scrapy.core.scheduler.DownloaderAwarePriorityQueue` classes from +:mod:`scrapy.core.scheduler` and may affect subclasses: + +* In the ``__init__`` method, most of the changes described above apply. + + ``__init__`` may still receive all parameters as positional parameters, + however: + + * ``downstream_queue_cls``, which replaced ``qfactory``, must be + instantiated differently. + + ``qfactory`` was instantiated with a priority value (integer). + + Instances of ``downstream_queue_cls`` should be created using + the new + :meth:`ScrapyPriorityQueue.qfactory ` + or + :meth:`DownloaderAwarePriorityQueue.pqfactory ` + methods. + + * The new ``key`` parameter displaced the ``startprios`` + parameter 1 position to the right. + +* The following class attributes have been added: + + * :attr:`~scrapy.core.scheduler.ScrapyPriorityQueue.crawler` + + * :attr:`~scrapy.core.scheduler.ScrapyPriorityQueue.downstream_queue_cls` + (details above) + + * :attr:`~scrapy.core.scheduler.ScrapyPriorityQueue.key` (details above) + +* The ``serialize`` attribute has been removed (details above) + +The following changes affect specifically the +:class:`~scrapy.core.scheduler.ScrapyPriorityQueue` class and may affect +subclasses: + +* A new :meth:`~scrapy.core.scheduler.ScrapyPriorityQueue.priority` + method has been added which, given a request, returns + ``request.priority * -1``. + + It is used in :meth:`~scrapy.core.scheduler.ScrapyPriorityQueue.push` + to make up for the removal of its ``priority`` parameter. + +* The ``spider`` attribute has been removed. Use + :attr:`crawler.spider ` + instead. + +The following changes affect specifically the +:class:`~scrapy.core.scheduler.DownloaderAwarePriorityQueue` class and may +affect subclasses: + +* A new :attr:`~scrapy.core.scheduler.DownloaderAwarePriorityQueue.pqueues` + attribute offers a mapping of downloader slot names to the + corresponding instances of + :attr:`~scrapy.core.scheduler.DownloaderAwarePriorityQueue.downstream_queue_cls`. + +(:issue:`3884`) + + +.. _release-1.8.3: + +Scrapy 1.8.3 (2022-07-25) +------------------------- + +**Security bug fix:** + +- When :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` + processes a request with :reqmeta:`proxy` metadata, and that + :reqmeta:`proxy` metadata includes proxy credentials, + :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` sets + the ``Proxy-Authorization`` header, but only if that header is not already + set. + + There are third-party proxy-rotation downloader middlewares that set + different :reqmeta:`proxy` metadata every time they process a request. + + Because of request retries and redirects, the same request can be processed + by downloader middlewares more than once, including both + :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` and + any third-party proxy-rotation downloader middleware. + + These third-party proxy-rotation downloader middlewares could change the + :reqmeta:`proxy` metadata of a request to a new value, but fail to remove + the ``Proxy-Authorization`` header from the previous value of the + :reqmeta:`proxy` metadata, causing the credentials of one proxy to be sent + to a different proxy. + + To prevent the unintended leaking of proxy credentials, the behavior of + :class:`~scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware` is now + as follows when processing a request: + + - If the request being processed defines :reqmeta:`proxy` metadata that + includes credentials, the ``Proxy-Authorization`` header is always + updated to feature those credentials. + + - If the request being processed defines :reqmeta:`proxy` metadata + without credentials, the ``Proxy-Authorization`` header is removed + *unless* it was originally defined for the same proxy URL. + + To remove proxy credentials while keeping the same proxy URL, remove + the ``Proxy-Authorization`` header. + + - If the request has no :reqmeta:`proxy` metadata, or that metadata is a + falsy value (e.g. ``None``), the ``Proxy-Authorization`` header is + removed. + + It is no longer possible to set a proxy URL through the + :reqmeta:`proxy` metadata but set the credentials through the + ``Proxy-Authorization`` header. Set proxy credentials through the + :reqmeta:`proxy` metadata instead. + + +.. _release-1.8.2: + +Scrapy 1.8.2 (2022-03-01) +------------------------- + +**Security bug fixes:** + +- When a :class:`~scrapy.http.Request` object with cookies defined gets a + redirect response causing a new :class:`~scrapy.http.Request` object to be + scheduled, the cookies defined in the original + :class:`~scrapy.http.Request` object are no longer copied into the new + :class:`~scrapy.http.Request` object. + + If you manually set the ``Cookie`` header on a + :class:`~scrapy.http.Request` object and the domain name of the redirect + URL is not an exact match for the domain of the URL of the original + :class:`~scrapy.http.Request` object, your ``Cookie`` header is now dropped + from the new :class:`~scrapy.http.Request` object. + + The old behavior could be exploited by an attacker to gain access to your + cookies. Please, see the `cjvr-mfj7-j4j8 security advisory`_ for more + information. + + .. _cjvr-mfj7-j4j8 security advisory: https://github.com/scrapy/scrapy/security/advisories/GHSA-cjvr-mfj7-j4j8 + + .. note:: It is still possible to enable the sharing of cookies between + different domains with a shared domain suffix (e.g. + ``example.com`` and any subdomain) by defining the shared domain + suffix (e.g. ``example.com``) as the cookie domain when defining + your cookies. See the documentation of the + :class:`~scrapy.http.Request` class for more information. + +- When the domain of a cookie, either received in the ``Set-Cookie`` header + of a response or defined in a :class:`~scrapy.http.Request` object, is set + to a `public suffix `_, the cookie is now + ignored unless the cookie domain is the same as the request domain. + + The old behavior could be exploited by an attacker to inject cookies into + your requests to some other domains. Please, see the `mfjm-vh54-3f96 + security advisory`_ for more information. + + .. _mfjm-vh54-3f96 security advisory: https://github.com/scrapy/scrapy/security/advisories/GHSA-mfjm-vh54-3f96 + + +.. _release-1.8.1: + +Scrapy 1.8.1 (2021-10-05) +------------------------- + +* **Security bug fix:** + + If you use + :class:`~scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware` + (i.e. the ``http_user`` and ``http_pass`` spider attributes) for HTTP + authentication, any request exposes your credentials to the request target. + + To prevent unintended exposure of authentication credentials to unintended + domains, you must now additionally set a new, additional spider attribute, + ``http_auth_domain``, and point it to the specific domain to which the + authentication credentials must be sent. + + If the ``http_auth_domain`` spider attribute is not set, the domain of the + first request will be considered the HTTP authentication target, and + authentication credentials will only be sent in requests targeting that + domain. + + If you need to send the same HTTP authentication credentials to multiple + domains, you can use :func:`w3lib.http.basic_auth_header` instead to + set the value of the ``Authorization`` header of your requests. + + If you *really* want your spider to send the same HTTP authentication + credentials to any domain, set the ``http_auth_domain`` spider attribute + to ``None``. + + Finally, if you are a user of `scrapy-splash`_, know that this version of + Scrapy breaks compatibility with scrapy-splash 0.7.2 and earlier. You will + need to upgrade scrapy-splash to a greater version for it to continue to + work. + +.. _scrapy-splash: https://github.com/scrapy-plugins/scrapy-splash + + +.. _release-1.8.0: + +Scrapy 1.8.0 (2019-10-28) +------------------------- + +Highlights: + +* Dropped Python 3.4 support and updated minimum requirements; made Python 3.8 + support official +* New :meth:`Request.from_curl ` class method +* New :setting:`ROBOTSTXT_PARSER` and :setting:`ROBOTSTXT_USER_AGENT` settings +* New :setting:`DOWNLOADER_CLIENT_TLS_CIPHERS` and + :setting:`DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING` settings + +Backward-incompatible changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Python 3.4 is no longer supported, and some of the minimum requirements of + Scrapy have also changed: + + * :doc:`cssselect ` 0.9.1 + * cryptography_ 2.0 + * lxml_ 3.5.0 + * pyOpenSSL_ 16.2.0 + * queuelib_ 1.4.2 + * service_identity_ 16.0.0 + * six_ 1.10.0 + * Twisted_ 17.9.0 (16.0.0 with Python 2) + * zope.interface_ 4.1.3 + + (:issue:`3892`) + +* ``JSONRequest`` is now called :class:`~scrapy.http.JsonRequest` for + consistency with similar classes (:issue:`3929`, :issue:`3982`) + +* If you are using a custom context factory + (:setting:`DOWNLOADER_CLIENTCONTEXTFACTORY`), its ``__init__`` method must + accept two new parameters: ``tls_verbose_logging`` and ``tls_ciphers`` + (:issue:`2111`, :issue:`3392`, :issue:`3442`, :issue:`3450`) + +* :class:`~scrapy.loader.ItemLoader` now turns the values of its input item + into lists: + + .. code-block:: pycon + + >>> item = MyItem() + >>> item["field"] = "value1" + >>> loader = ItemLoader(item=item) + >>> item["field"] + ['value1'] + + This is needed to allow adding values to existing fields + (``loader.add_value('field', 'value2')``). + + (:issue:`3804`, :issue:`3819`, :issue:`3897`, :issue:`3976`, :issue:`3998`, + :issue:`4036`) + +See also :ref:`1.8-deprecation-removals` below. + + +New features +~~~~~~~~~~~~ + +* A new :meth:`Request.from_curl ` class + method allows :ref:`creating a request from a cURL command + ` (:issue:`2985`, :issue:`3862`) + +* A new :setting:`ROBOTSTXT_PARSER` setting allows choosing which robots.txt_ + parser to use. It includes built-in support for + :ref:`RobotFileParser `, + :ref:`Protego ` (default), :ref:`Reppy `, and + :ref:`Robotexclusionrulesparser `, and allows you to + :ref:`implement support for additional parsers + ` (:issue:`754`, :issue:`2669`, + :issue:`3796`, :issue:`3935`, :issue:`3969`, :issue:`4006`) + +* A new :setting:`ROBOTSTXT_USER_AGENT` setting allows defining a separate + user agent string to use for robots.txt_ parsing (:issue:`3931`, + :issue:`3966`) + +* :class:`~scrapy.spiders.Rule` no longer requires a :class:`LinkExtractor + ` parameter + (:issue:`781`, :issue:`4016`) + +* Use the new :setting:`DOWNLOADER_CLIENT_TLS_CIPHERS` setting to customize + the TLS/SSL ciphers used by the default HTTP/1.1 downloader (:issue:`3392`, + :issue:`3442`) + +* Set the new :setting:`DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING` setting to + ``True`` to enable debug-level messages about TLS connection parameters + after establishing HTTPS connections (:issue:`2111`, :issue:`3450`) + +* Callbacks that receive keyword arguments + (see :attr:`Request.cb_kwargs `) can now be + tested using the new :class:`@cb_kwargs + ` + :ref:`spider contract ` (:issue:`3985`, :issue:`3988`) + +* When a :class:`@scrapes ` spider + contract fails, all missing fields are now reported (:issue:`766`, + :issue:`3939`) + +* :ref:`Custom log formats ` can now drop messages by + having the corresponding methods of the configured :setting:`LOG_FORMATTER` + return ``None`` (:issue:`3984`, :issue:`3987`) + +* A much improved completion definition is now available for Zsh_ + (:issue:`4069`) + + +Bug fixes +~~~~~~~~~ + +* :meth:`ItemLoader.load_item() ` no + longer makes later calls to :meth:`ItemLoader.get_output_value() + ` or + :meth:`ItemLoader.load_item() ` return + empty data (:issue:`3804`, :issue:`3819`, :issue:`3897`, :issue:`3976`, + :issue:`3998`, :issue:`4036`) + +* Fixed :class:`~scrapy.statscollectors.DummyStatsCollector` raising a + :exc:`TypeError` exception (:issue:`4007`, :issue:`4052`) + +* :meth:`FilesPipeline.file_path + ` and + :meth:`ImagesPipeline.file_path + ` no longer choose + file extensions that are not `registered with IANA`_ (:issue:`1287`, + :issue:`3953`, :issue:`3954`) + +* When using botocore_ to persist files in S3, all botocore-supported headers + are properly mapped now (:issue:`3904`, :issue:`3905`) + +* FTP passwords in :setting:`FEED_URI` containing percent-escaped characters + are now properly decoded (:issue:`3941`) + +* A memory-handling and error-handling issue in + :func:`scrapy.utils.ssl.get_temp_key_info` has been fixed (:issue:`3920`) + + +Documentation +~~~~~~~~~~~~~ + +* The documentation now covers how to define and configure a :ref:`custom log + format ` (:issue:`3616`, :issue:`3660`) + +* API documentation added for :class:`~scrapy.exporters.MarshalItemExporter` + and :class:`~scrapy.exporters.PythonItemExporter` (:issue:`3973`) + +* API documentation added for :class:`~scrapy.item.BaseItem` and + :class:`~scrapy.item.ItemMeta` (:issue:`3999`) + +* Minor documentation fixes (:issue:`2998`, :issue:`3398`, :issue:`3597`, + :issue:`3894`, :issue:`3934`, :issue:`3978`, :issue:`3993`, :issue:`4022`, + :issue:`4028`, :issue:`4033`, :issue:`4046`, :issue:`4050`, :issue:`4055`, + :issue:`4056`, :issue:`4061`, :issue:`4072`, :issue:`4071`, :issue:`4079`, + :issue:`4081`, :issue:`4089`, :issue:`4093`) + + +.. _1.8-deprecation-removals: + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +* ``scrapy.xlib`` has been removed (:issue:`4015`) + + +.. _1.8-deprecations: + +Deprecations +~~~~~~~~~~~~ + +* The LevelDB_ storage backend + (``scrapy.extensions.httpcache.LeveldbCacheStorage``) of + :class:`~scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware` is + deprecated (:issue:`4085`, :issue:`4092`) + +* Use of the undocumented ``SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE`` environment + variable is deprecated (:issue:`3910`) + +* ``scrapy.item.DictItem`` is deprecated, use :class:`~scrapy.item.Item` + instead (:issue:`3999`) + + +Other changes +~~~~~~~~~~~~~ + +* Minimum versions of optional Scrapy requirements that are covered by + continuous integration tests have been updated: + + * botocore_ 1.3.23 + * Pillow_ 3.4.2 + + Lower versions of these optional requirements may work, but it is not + guaranteed (:issue:`3892`) + +* GitHub templates for bug reports and feature requests (:issue:`3126`, + :issue:`3471`, :issue:`3749`, :issue:`3754`) + +* Continuous integration fixes (:issue:`3923`) + +* Code cleanup (:issue:`3391`, :issue:`3907`, :issue:`3946`, :issue:`3950`, + :issue:`4023`, :issue:`4031`) + + +.. _release-1.7.4: + +Scrapy 1.7.4 (2019-10-21) +------------------------- + +Revert the fix for :issue:`3804` (:issue:`3819`), which has a few undesired +side effects (:issue:`3897`, :issue:`3976`). + +As a result, when an item loader is initialized with an item, +:meth:`ItemLoader.load_item() ` once again +makes later calls to :meth:`ItemLoader.get_output_value() +` or :meth:`ItemLoader.load_item() +` return empty data. + + +.. _release-1.7.3: + +Scrapy 1.7.3 (2019-08-01) +------------------------- + +Enforce lxml 4.3.5 or lower for Python 3.4 (:issue:`3912`, :issue:`3918`). + + +.. _release-1.7.2: + +Scrapy 1.7.2 (2019-07-23) +------------------------- + +Fix Python 2 support (:issue:`3889`, :issue:`3893`, :issue:`3896`). + + +.. _release-1.7.1: + +Scrapy 1.7.1 (2019-07-18) +------------------------- + +Re-packaging of Scrapy 1.7.0, which was missing some changes in PyPI. + + +.. _release-1.7.0: + +Scrapy 1.7.0 (2019-07-18) +------------------------- + +.. note:: Make sure you install Scrapy 1.7.1. The Scrapy 1.7.0 package in PyPI + is the result of an erroneous commit tagging and does not include all + the changes described below. + +Highlights: + +* Improvements for crawls targeting multiple domains +* A cleaner way to pass arguments to callbacks +* A new class for JSON requests +* Improvements for rule-based spiders +* New features for feed exports + +Backward-incompatible changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``429`` is now part of the :setting:`RETRY_HTTP_CODES` setting by default + + This change is **backward incompatible**. If you don’t want to retry + ``429``, you must override :setting:`RETRY_HTTP_CODES` accordingly. + +* :class:`~scrapy.crawler.Crawler`, + :class:`CrawlerRunner.crawl ` and + :class:`CrawlerRunner.create_crawler ` + no longer accept a :class:`~scrapy.spiders.Spider` subclass instance, they + only accept a :class:`~scrapy.spiders.Spider` subclass now. + + :class:`~scrapy.spiders.Spider` subclass instances were never meant to + work, and they were not working as one would expect: instead of using the + passed :class:`~scrapy.spiders.Spider` subclass instance, their + :class:`~scrapy.spiders.Spider.from_crawler` method was called to generate + a new instance. + +* Non-default values for the :setting:`SCHEDULER_PRIORITY_QUEUE` setting + may stop working. Scheduler priority queue classes now need to handle + :class:`~scrapy.http.Request` objects instead of arbitrary Python data + structures. + +* An additional ``crawler`` parameter has been added to the ``__init__`` + method of the :class:`~scrapy.core.scheduler.Scheduler` class. Custom + scheduler subclasses which don't accept arbitrary parameters in their + ``__init__`` method might break because of this change. + + For more information, see :setting:`SCHEDULER`. + +See also :ref:`1.7-deprecation-removals` below. + + +New features +~~~~~~~~~~~~ + +* A new scheduler priority queue, + ``scrapy.pqueues.DownloaderAwarePriorityQueue``, may be + :ref:`enabled ` for a significant + scheduling improvement on crawls targeting multiple web domains, at the + cost of no :setting:`CONCURRENT_REQUESTS_PER_IP` support (:issue:`3520`) + +* A new :attr:`Request.cb_kwargs ` attribute + provides a cleaner way to pass keyword arguments to callback methods + (:issue:`1138`, :issue:`3563`) + +* A new :class:`JSONRequest ` class offers a more + convenient way to build JSON requests (:issue:`3504`, :issue:`3505`) + +* A ``process_request`` callback passed to the :class:`~scrapy.spiders.Rule` + ``__init__`` method now receives the :class:`~scrapy.http.Response` object that + originated the request as its second argument (:issue:`3682`) + +* A new ``restrict_text`` parameter for the + :attr:`LinkExtractor ` + ``__init__`` method allows filtering links by linking text (:issue:`3622`, + :issue:`3635`) + +* A new :setting:`FEED_STORAGE_S3_ACL` setting allows defining a custom ACL + for feeds exported to Amazon S3 (:issue:`3607`) + +* A new :setting:`FEED_STORAGE_FTP_ACTIVE` setting allows using FTP’s active + connection mode for feeds exported to FTP servers (:issue:`3829`) + +* A new :setting:`METAREFRESH_IGNORE_TAGS` setting allows overriding which + HTML tags are ignored when searching a response for HTML meta tags that + trigger a redirect (:issue:`1422`, :issue:`3768`) + +* A new :reqmeta:`redirect_reasons` request meta key exposes the reason + (status code, meta refresh) behind every followed redirect (:issue:`3581`, + :issue:`3687`) + +* The ``SCRAPY_CHECK`` variable is now set to the ``true`` string during runs + of the :command:`check` command, which allows :ref:`detecting contract + check runs from code ` (:issue:`3704`, + :issue:`3739`) + +* A new :meth:`Item.deepcopy() ` method makes it + easier to :ref:`deep-copy items ` (:issue:`1493`, + :issue:`3671`) + +* :class:`~scrapy.extensions.corestats.CoreStats` also logs + ``elapsed_time_seconds`` now (:issue:`3638`) + +* Exceptions from :class:`~scrapy.loader.ItemLoader` :ref:`input and output + processors ` are now more verbose + (:issue:`3836`, :issue:`3840`) + +* :class:`~scrapy.crawler.Crawler`, + :class:`CrawlerRunner.crawl ` and + :class:`CrawlerRunner.create_crawler ` + now fail gracefully if they receive a :class:`~scrapy.spiders.Spider` + subclass instance instead of the subclass itself (:issue:`2283`, + :issue:`3610`, :issue:`3872`) + + +Bug fixes +~~~~~~~~~ + +* :meth:`~scrapy.spidermiddlewares.SpiderMiddleware.process_spider_exception` + is now also invoked for generators (:issue:`220`, :issue:`2061`) + +* System exceptions like KeyboardInterrupt_ are no longer caught + (:issue:`3726`) + +* :meth:`ItemLoader.load_item() ` no + longer makes later calls to :meth:`ItemLoader.get_output_value() + ` or + :meth:`ItemLoader.load_item() ` return + empty data (:issue:`3804`, :issue:`3819`) + +* The images pipeline (:class:`~scrapy.pipelines.images.ImagesPipeline`) no + longer ignores these Amazon S3 settings: :setting:`AWS_ENDPOINT_URL`, + :setting:`AWS_REGION_NAME`, :setting:`AWS_USE_SSL`, :setting:`AWS_VERIFY` + (:issue:`3625`) + +* Fixed a memory leak in ``scrapy.pipelines.media.MediaPipeline`` affecting, + for example, non-200 responses and exceptions from custom middlewares + (:issue:`3813`) + +* Requests with private callbacks are now correctly unserialized from disk + (:issue:`3790`) + +* :meth:`FormRequest.from_response() ` + now handles invalid methods like major web browsers (:issue:`3777`, + :issue:`3794`) + + +Documentation +~~~~~~~~~~~~~ + +* A new topic, :ref:`topics-dynamic-content`, covers recommended approaches + to read dynamically-loaded data (:issue:`3703`) + +* :ref:`topics-broad-crawls` now features information about memory usage + (:issue:`1264`, :issue:`3866`) + +* The documentation of :class:`~scrapy.spiders.Rule` now covers how to access + the text of a link when using :class:`~scrapy.spiders.CrawlSpider` + (:issue:`3711`, :issue:`3712`) + +* A new section, :ref:`httpcache-storage-custom`, covers writing a custom + cache storage backend for + :class:`~scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware` + (:issue:`3683`, :issue:`3692`) + +* A new :ref:`FAQ ` entry, :ref:`faq-split-item`, explains what to do + when you want to split an item into multiple items from an item pipeline + (:issue:`2240`, :issue:`3672`) + +* Updated the :ref:`FAQ entry about crawl order ` to explain why + the first few requests rarely follow the desired order (:issue:`1739`, + :issue:`3621`) + +* The :setting:`LOGSTATS_INTERVAL` setting (:issue:`3730`), the + :meth:`FilesPipeline.file_path ` + and + :meth:`ImagesPipeline.file_path ` + methods (:issue:`2253`, :issue:`3609`) and the + :meth:`Crawler.stop() ` method (:issue:`3842`) + are now documented + +* Some parts of the documentation that were confusing or misleading are now + clearer (:issue:`1347`, :issue:`1789`, :issue:`2289`, :issue:`3069`, + :issue:`3615`, :issue:`3626`, :issue:`3668`, :issue:`3670`, :issue:`3673`, + :issue:`3728`, :issue:`3762`, :issue:`3861`, :issue:`3882`) + +* Minor documentation fixes (:issue:`3648`, :issue:`3649`, :issue:`3662`, + :issue:`3674`, :issue:`3676`, :issue:`3694`, :issue:`3724`, :issue:`3764`, + :issue:`3767`, :issue:`3791`, :issue:`3797`, :issue:`3806`, :issue:`3812`) + +.. _1.7-deprecation-removals: + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +The following deprecated APIs have been removed (:issue:`3578`): + +* ``scrapy.conf`` (use :attr:`Crawler.settings + `) + +* From ``scrapy.core.downloader.handlers``: + + * ``http.HttpDownloadHandler`` (use ``http10.HTTP10DownloadHandler``) + +* ``scrapy.loader.ItemLoader._get_values`` (use ``_get_xpathvalues``) + +* ``scrapy.loader.XPathItemLoader`` (use :class:`~scrapy.loader.ItemLoader`) + +* ``scrapy.log`` (see :ref:`topics-logging`) + +* From ``scrapy.pipelines``: + + * ``files.FilesPipeline.file_key`` (use ``file_path``) + + * ``images.ImagesPipeline.file_key`` (use ``file_path``) + + * ``images.ImagesPipeline.image_key`` (use ``file_path``) + + * ``images.ImagesPipeline.thumb_key`` (use ``thumb_path``) + +* From both ``scrapy.selector`` and ``scrapy.selector.lxmlsel``: + + * ``HtmlXPathSelector`` (use :class:`~scrapy.selector.Selector`) + + * ``XmlXPathSelector`` (use :class:`~scrapy.selector.Selector`) + + * ``XPathSelector`` (use :class:`~scrapy.selector.Selector`) + + * ``XPathSelectorList`` (use :class:`~scrapy.selector.Selector`) + +* From ``scrapy.selector.csstranslator``: + + * ``ScrapyGenericTranslator`` (use parsel.csstranslator.GenericTranslator_) + + * ``ScrapyHTMLTranslator`` (use parsel.csstranslator.HTMLTranslator_) + + * ``ScrapyXPathExpr`` (use parsel.csstranslator.XPathExpr_) + +* From :class:`~scrapy.selector.Selector`: + + * ``_root`` (both the ``__init__`` method argument and the object property, use + ``root``) + + * ``extract_unquoted`` (use ``getall``) + + * ``select`` (use ``xpath``) + +* From :class:`~scrapy.selector.SelectorList`: + + * ``extract_unquoted`` (use ``getall``) + + * ``select`` (use ``xpath``) + + * ``x`` (use ``xpath``) + +* ``scrapy.spiders.BaseSpider`` (use :class:`~scrapy.spiders.Spider`) + +* From :class:`~scrapy.spiders.Spider` (and subclasses): + + * ``DOWNLOAD_DELAY`` (use :ref:`download_delay + `) + + * ``set_crawler`` (use :meth:`~scrapy.spiders.Spider.from_crawler`) + +* ``scrapy.spiders.spiders`` (use :class:`~scrapy.spiderloader.SpiderLoader`) + +* ``scrapy.telnet`` (use :mod:`scrapy.extensions.telnet`) + +* From ``scrapy.utils.python``: + + * ``str_to_unicode`` (use ``to_unicode``) + + * ``unicode_to_str`` (use ``to_bytes``) + +* ``scrapy.utils.response.body_or_str`` + +The following deprecated settings have also been removed (:issue:`3578`): + +* ``SPIDER_MANAGER_CLASS`` (use :setting:`SPIDER_LOADER_CLASS`) + + +.. _1.7-deprecations: + +Deprecations +~~~~~~~~~~~~ + +* The ``queuelib.PriorityQueue`` value for the + :setting:`SCHEDULER_PRIORITY_QUEUE` setting is deprecated. Use + ``scrapy.pqueues.ScrapyPriorityQueue`` instead. + +* ``process_request`` callbacks passed to :class:`~scrapy.spiders.Rule` that + do not accept two arguments are deprecated. + +* The following modules are deprecated: + + * ``scrapy.utils.http`` (use `w3lib.http`_) + + * ``scrapy.utils.markup`` (use `w3lib.html`_) + + * ``scrapy.utils.multipart`` (use `urllib3`_) + +* The ``scrapy.utils.datatypes.MergeDict`` class is deprecated for Python 3 + code bases. Use :class:`~collections.ChainMap` instead. (:issue:`3878`) + +* The ``scrapy.utils.gz.is_gzipped`` function is deprecated. Use + ``scrapy.utils.gz.gzip_magic_number`` instead. + +.. _urllib3: https://urllib3.readthedocs.io/en/latest/index.html +.. _w3lib.html: https://w3lib.readthedocs.io/en/latest/w3lib.html#module-w3lib.html +.. _w3lib.http: https://w3lib.readthedocs.io/en/latest/w3lib.html#module-w3lib.http + + +Other changes +~~~~~~~~~~~~~ + +* It is now possible to run all tests from the same tox_ environment in + parallel; the documentation now covers :ref:`this and other ways to run + tests ` (:issue:`3707`) + +* It is now possible to generate an API documentation coverage report + (:issue:`3806`, :issue:`3810`, :issue:`3860`) + +* The :ref:`documentation policies ` now require + docstrings_ (:issue:`3701`) that follow `PEP 257`_ (:issue:`3748`) + +* Internal fixes and cleanup (:issue:`3629`, :issue:`3643`, :issue:`3684`, + :issue:`3698`, :issue:`3734`, :issue:`3735`, :issue:`3736`, :issue:`3737`, + :issue:`3809`, :issue:`3821`, :issue:`3825`, :issue:`3827`, :issue:`3833`, + :issue:`3857`, :issue:`3877`) + +.. _release-1.6.0: + +Scrapy 1.6.0 (2019-01-30) +------------------------- + +Highlights: + +* better Windows support; +* Python 3.7 compatibility; +* big documentation improvements, including a switch + from ``.extract_first()`` + ``.extract()`` API to ``.get()`` + ``.getall()`` + API; +* feed exports, FilePipeline and MediaPipeline improvements; +* better extensibility: :signal:`item_error` and + :signal:`request_reached_downloader` signals; ``from_crawler`` support + for feed exporters, feed storages and dupefilters. +* ``scrapy.contracts`` fixes and new features; +* telnet console security improvements, first released as a + backport in :ref:`release-1.5.2`; +* clean-up of the deprecated code; +* various bug fixes, small new features and usability improvements across + the codebase. + +Selector API changes +~~~~~~~~~~~~~~~~~~~~ + +While these are not changes in Scrapy itself, but rather in the parsel_ +library which Scrapy uses for xpath/css selectors, these changes are +worth mentioning here. Scrapy now depends on parsel >= 1.5, and +Scrapy documentation is updated to follow recent ``parsel`` API conventions. + +Most visible change is that ``.get()`` and ``.getall()`` selector +methods are now preferred over ``.extract_first()`` and ``.extract()``. +We feel that these new methods result in a more concise and readable code. +See :ref:`old-extraction-api` for more details. + +.. note:: + There are currently **no plans** to deprecate ``.extract()`` + and ``.extract_first()`` methods. + +Another useful new feature is the introduction of ``Selector.attrib`` and +``SelectorList.attrib`` properties, which make it easier to get +attributes of HTML elements. See :ref:`selecting-attributes`. + +CSS selectors are cached in parsel >= 1.5, which makes them faster +when the same CSS path is used many times. This is very common in +case of Scrapy spiders: callbacks are usually called several times, +on different pages. + +If you're using custom ``Selector`` or ``SelectorList`` subclasses, +a **backward incompatible** change in parsel may affect your code. +See `parsel changelog`_ for a detailed description, as well as for the +full list of improvements. + +.. _parsel changelog: https://parsel.readthedocs.io/en/latest/history.html + +Telnet console +~~~~~~~~~~~~~~ + +**Backward incompatible**: Scrapy's telnet console now requires username +and password. See :ref:`topics-telnetconsole` for more details. This change +fixes a **security issue**; see :ref:`release-1.5.2` release notes for details. + +New extensibility features +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* ``from_crawler`` support is added to feed exporters and feed storages. This, + among other things, allows to access Scrapy settings from custom feed + storages and exporters (:issue:`1605`, :issue:`3348`). +* ``from_crawler`` support is added to dupefilters (:issue:`2956`); this allows + to access e.g. settings or a spider from a dupefilter. +* :signal:`item_error` is fired when an error happens in a pipeline + (:issue:`3256`); +* :signal:`request_reached_downloader` is fired when Downloader gets + a new Request; this signal can be useful e.g. for custom Schedulers + (:issue:`3393`). +* new SitemapSpider :meth:`~.SitemapSpider.sitemap_filter` method which allows + to select sitemap entries based on their attributes in SitemapSpider + subclasses (:issue:`3512`). +* Lazy loading of Downloader Handlers is now optional; this enables better + initialization error handling in custom Downloader Handlers (:issue:`3394`). + +New FilePipeline and MediaPipeline features +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Expose more options for S3FilesStore: :setting:`AWS_ENDPOINT_URL`, + :setting:`AWS_USE_SSL`, :setting:`AWS_VERIFY`, :setting:`AWS_REGION_NAME`. + For example, this allows to use alternative or self-hosted + AWS-compatible providers (:issue:`2609`, :issue:`3548`). +* ACL support for Google Cloud Storage: :setting:`FILES_STORE_GCS_ACL` and + :setting:`IMAGES_STORE_GCS_ACL` (:issue:`3199`). + +``scrapy.contracts`` improvements +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Exceptions in contracts code are handled better (:issue:`3377`); +* ``dont_filter=True`` is used for contract requests, which allows to test + different callbacks with the same URL (:issue:`3381`); +* ``request_cls`` attribute in Contract subclasses allow to use different + Request classes in contracts, for example FormRequest (:issue:`3383`). +* Fixed errback handling in contracts, e.g. for cases where a contract + is executed for URL which returns non-200 response (:issue:`3371`). + +Usability improvements +~~~~~~~~~~~~~~~~~~~~~~ + +* more stats for RobotsTxtMiddleware (:issue:`3100`) +* INFO log level is used to show telnet host/port (:issue:`3115`) +* a message is added to IgnoreRequest in RobotsTxtMiddleware (:issue:`3113`) +* better validation of ``url`` argument in ``Response.follow`` (:issue:`3131`) +* non-zero exit code is returned from Scrapy commands when error happens + on spider initialization (:issue:`3226`) +* Link extraction improvements: "ftp" is added to scheme list (:issue:`3152`); + "flv" is added to common video extensions (:issue:`3165`) +* better error message when an exporter is disabled (:issue:`3358`); +* ``scrapy shell --help`` mentions syntax required for local files + (``./file.html``) - :issue:`3496`. +* Referer header value is added to RFPDupeFilter log messages (:issue:`3588`) + +Bug fixes +~~~~~~~~~ + +* fixed issue with extra blank lines in .csv exports under Windows + (:issue:`3039`); +* proper handling of pickling errors in Python 3 when serializing objects + for disk queues (:issue:`3082`) +* flags are now preserved when copying Requests (:issue:`3342`); +* FormRequest.from_response clickdata shouldn't ignore elements with + ``input[type=image]`` (:issue:`3153`). +* FormRequest.from_response should preserve duplicate keys (:issue:`3247`) + +Documentation improvements +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Docs are re-written to suggest .get/.getall API instead of + .extract/.extract_first. Also, :ref:`topics-selectors` docs are updated + and re-structured to match latest parsel docs; they now contain more topics, + such as :ref:`selecting-attributes` or :ref:`topics-selectors-css-extensions` + (:issue:`3390`). +* :ref:`topics-developer-tools` is a new tutorial which replaces + old Firefox and Firebug tutorials (:issue:`3400`). +* SCRAPY_PROJECT environment variable is documented (:issue:`3518`); +* troubleshooting section is added to install instructions (:issue:`3517`); +* improved links to beginner resources in the tutorial + (:issue:`3367`, :issue:`3468`); +* fixed :setting:`RETRY_HTTP_CODES` default values in docs (:issue:`3335`); +* remove unused ``DEPTH_STATS`` option from docs (:issue:`3245`); +* other cleanups (:issue:`3347`, :issue:`3350`, :issue:`3445`, :issue:`3544`, + :issue:`3605`). + +Deprecation removals +~~~~~~~~~~~~~~~~~~~~ + +Compatibility shims for pre-1.0 Scrapy module names are removed +(:issue:`3318`): + +* ``scrapy.command`` +* ``scrapy.contrib`` (with all submodules) +* ``scrapy.contrib_exp`` (with all submodules) +* ``scrapy.dupefilter`` +* ``scrapy.linkextractor`` +* ``scrapy.project`` +* ``scrapy.spider`` +* ``scrapy.spidermanager`` +* ``scrapy.squeue`` +* ``scrapy.stats`` +* ``scrapy.statscol`` +* ``scrapy.utils.decorator`` + +See :ref:`module-relocations` for more information, or use suggestions +from Scrapy 1.5.x deprecation warnings to update your code. + +Other deprecation removals: + +* Deprecated scrapy.interfaces.ISpiderManager is removed; please use + scrapy.interfaces.ISpiderLoader. +* Deprecated ``CrawlerSettings`` class is removed (:issue:`3327`). +* Deprecated ``Settings.overrides`` and ``Settings.defaults`` attributes + are removed (:issue:`3327`, :issue:`3359`). + +Other improvements, cleanups +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* All Scrapy tests now pass on Windows; Scrapy testing suite is executed + in a Windows environment on CI (:issue:`3315`). +* Python 3.7 support (:issue:`3326`, :issue:`3150`, :issue:`3547`). +* Testing and CI fixes (:issue:`3526`, :issue:`3538`, :issue:`3308`, + :issue:`3311`, :issue:`3309`, :issue:`3305`, :issue:`3210`, :issue:`3299`) +* ``scrapy.http.cookies.CookieJar.clear`` accepts "domain", "path" and "name" + optional arguments (:issue:`3231`). +* additional files are included to sdist (:issue:`3495`); +* code style fixes (:issue:`3405`, :issue:`3304`); +* unneeded .strip() call is removed (:issue:`3519`); +* collections.deque is used to store MiddlewareManager methods instead + of a list (:issue:`3476`) + +.. _release-1.5.2: + +Scrapy 1.5.2 (2019-01-22) +------------------------- + +* *Security bugfix*: Telnet console extension can be easily exploited by rogue + websites POSTing content to http://localhost:6023, we haven't found a way to + exploit it from Scrapy, but it is very easy to trick a browser to do so and + elevates the risk for local development environment. + + *The fix is backward incompatible*, it enables telnet user-password + authentication by default with a random generated password. If you can't + upgrade right away, please consider setting :setting:`TELNETCONSOLE_PORT` + out of its default value. + + See :ref:`telnet console ` documentation for more info + +* Backport CI build failure under GCE environment due to boto import error. + +.. _release-1.5.1: + +Scrapy 1.5.1 (2018-07-12) +------------------------- + +This is a maintenance release with important bug fixes, but no new features: + +* ``O(N^2)`` gzip decompression issue which affected Python 3 and PyPy + is fixed (:issue:`3281`); +* skipping of TLS validation errors is improved (:issue:`3166`); +* Ctrl-C handling is fixed in Python 3.5+ (:issue:`3096`); +* testing fixes (:issue:`3092`, :issue:`3263`); +* documentation improvements (:issue:`3058`, :issue:`3059`, :issue:`3089`, + :issue:`3123`, :issue:`3127`, :issue:`3189`, :issue:`3224`, :issue:`3280`, + :issue:`3279`, :issue:`3201`, :issue:`3260`, :issue:`3284`, :issue:`3298`, + :issue:`3294`). + + +.. _release-1.5.0: + +Scrapy 1.5.0 (2017-12-29) +------------------------- + +This release brings small new features and improvements across the codebase. +Some highlights: + +* Google Cloud Storage is supported in FilesPipeline and ImagesPipeline. +* Crawling with proxy servers becomes more efficient, as connections + to proxies can be reused now. +* Warnings, exception and logging messages are improved to make debugging + easier. +* ``scrapy parse`` command now allows to set custom request meta via + ``--meta`` argument. +* Compatibility with Python 3.6, PyPy and PyPy3 is improved; + PyPy and PyPy3 are now supported officially, by running tests on CI. +* Better default handling of HTTP 308, 522 and 524 status codes. +* Documentation is improved, as usual. + +Backward Incompatible Changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +* Scrapy 1.5 drops support for Python 3.3. +* Default Scrapy User-Agent now uses https link to scrapy.org (:issue:`2983`). + **This is technically backward-incompatible**; override + :setting:`USER_AGENT` if you relied on old value. +* Logging of settings overridden by ``custom_settings`` is fixed; + **this is technically backward-incompatible** because the logger + changes from ``[scrapy.utils.log]`` to ``[scrapy.crawler]``. If you're + parsing Scrapy logs, please update your log parsers (:issue:`1343`). +* LinkExtractor now ignores ``m4v`` extension by default, this is change + in behavior. +* 522 and 524 status codes are added to ``RETRY_HTTP_CODES`` (:issue:`2851`) + +New features +~~~~~~~~~~~~ + +- Support ```` tags in ``Response.follow`` (:issue:`2785`) +- Support for ``ptpython`` REPL (:issue:`2654`) +- Google Cloud Storage support for FilesPipeline and ImagesPipeline + (:issue:`2923`). +- New ``--meta`` option of the "scrapy parse" command allows to pass additional + request.meta (:issue:`2883`) +- Populate spider variable when using ``shell.inspect_response`` (:issue:`2812`) +- Handle HTTP 308 Permanent Redirect (:issue:`2844`) +- Add 522 and 524 to ``RETRY_HTTP_CODES`` (:issue:`2851`) +- Log versions information at startup (:issue:`2857`) +- ``scrapy.mail.MailSender`` now works in Python 3 (it requires Twisted 17.9.0) +- Connections to proxy servers are reused (:issue:`2743`) +- Add template for a downloader middleware (:issue:`2755`) +- Explicit message for NotImplementedError when parse callback not defined + (:issue:`2831`) +- CrawlerProcess got an option to disable installation of root log handler + (:issue:`2921`) +- LinkExtractor now ignores ``m4v`` extension by default +- Better log messages for responses over :setting:`DOWNLOAD_WARNSIZE` and + :setting:`DOWNLOAD_MAXSIZE` limits (:issue:`2927`) +- Show warning when a URL is put to ``Spider.allowed_domains`` instead of + a domain (:issue:`2250`). + +Bug fixes +~~~~~~~~~ + +- Fix logging of settings overridden by ``custom_settings``; + **this is technically backward-incompatible** because the logger + changes from ``[scrapy.utils.log]`` to ``[scrapy.crawler]``, so please + update your log parsers if needed (:issue:`1343`) +- Default Scrapy User-Agent now uses https link to scrapy.org (:issue:`2983`). + **This is technically backward-incompatible**; override + :setting:`USER_AGENT` if you relied on old value. +- Fix PyPy and PyPy3 test failures, support them officially + (:issue:`2793`, :issue:`2935`, :issue:`2990`, :issue:`3050`, :issue:`2213`, + :issue:`3048`) +- Fix DNS resolver when ``DNSCACHE_ENABLED=False`` (:issue:`2811`) +- Add ``cryptography`` for Debian Jessie tox test env (:issue:`2848`) +- Add verification to check if Request callback is callable (:issue:`2766`) +- Port ``extras/qpsclient.py`` to Python 3 (:issue:`2849`) +- Use getfullargspec under the scenes for Python 3 to stop DeprecationWarning + (:issue:`2862`) +- Update deprecated test aliases (:issue:`2876`) +- Fix ``SitemapSpider`` support for alternate links (:issue:`2853`) + +Docs +~~~~ + +- Added missing bullet point for the ``AUTOTHROTTLE_TARGET_CONCURRENCY`` + setting. (:issue:`2756`) +- Update Contributing docs, document new support channels + (:issue:`2762`, issue:`3038`) +- Include references to Scrapy subreddit in the docs +- Fix broken links; use https:// for external links + (:issue:`2978`, :issue:`2982`, :issue:`2958`) +- Document CloseSpider extension better (:issue:`2759`) +- Use ``pymongo.collection.Collection.insert_one()`` in MongoDB example + (:issue:`2781`) +- Spelling mistake and typos + (:issue:`2828`, :issue:`2837`, :issue:`2884`, :issue:`2924`) +- Clarify ``CSVFeedSpider.headers`` documentation (:issue:`2826`) +- Document ``DontCloseSpider`` exception and clarify ``spider_idle`` + (:issue:`2791`) +- Update "Releases" section in README (:issue:`2764`) +- Fix rst syntax in ``DOWNLOAD_FAIL_ON_DATALOSS`` docs (:issue:`2763`) +- Small fix in description of startproject arguments (:issue:`2866`) +- Clarify data types in Response.body docs (:issue:`2922`) +- Add a note about ``request.meta['depth']`` to DepthMiddleware docs (:issue:`2374`) +- Add a note about ``request.meta['dont_merge_cookies']`` to CookiesMiddleware + docs (:issue:`2999`) +- Up-to-date example of project structure (:issue:`2964`, :issue:`2976`) +- A better example of ItemExporters usage (:issue:`2989`) +- Document ``from_crawler`` methods for spider and downloader middlewares + (:issue:`3019`) + +.. _release-1.4.0: + +Scrapy 1.4.0 (2017-05-18) +------------------------- + +Scrapy 1.4 does not bring that many breathtaking new features +but quite a few handy improvements nonetheless. + +Scrapy now supports anonymous FTP sessions with customizable user and +password via the new :setting:`FTP_USER` and :setting:`FTP_PASSWORD` settings. +And if you're using Twisted version 17.1.0 or above, FTP is now available +with Python 3. + +There's a new :meth:`response.follow ` method +for creating requests; **it is now a recommended way to create Requests +in Scrapy spiders**. This method makes it easier to write correct +spiders; ``response.follow`` has several advantages over creating +``scrapy.Request`` objects directly: + +* it handles relative URLs; +* it works properly with non-ascii URLs on non-UTF8 pages; +* in addition to absolute and relative URLs it supports Selectors; + for ```` elements it can also extract their href values. + +For example, instead of this:: + + for href in response.css('li.page a::attr(href)').extract(): + url = response.urljoin(href) + yield scrapy.Request(url, self.parse, encoding=response.encoding) + +One can now write this:: + + for a in response.css('li.page a'): + yield response.follow(a, self.parse) + +Link extractors are also improved. They work similarly to what a regular +modern browser would do: leading and trailing whitespace are removed +from attributes (think ``href=" http://example.com"``) when building +``Link`` objects. This whitespace-stripping also happens for ``action`` +attributes with ``FormRequest``. + +**Please also note that link extractors do not canonicalize URLs by default +anymore.** This was puzzling users every now and then, and it's not what +browsers do in fact, so we removed that extra transformation on extracted +links. + +For those of you wanting more control on the ``Referer:`` header that Scrapy +sends when following links, you can set your own ``Referrer Policy``. +Prior to Scrapy 1.4, the default ``RefererMiddleware`` would simply and +blindly set it to the URL of the response that generated the HTTP request +(which could leak information on your URL seeds). +By default, Scrapy now behaves much like your regular browser does. +And this policy is fully customizable with W3C standard values +(or with something really custom of your own if you wish). +See :setting:`REFERRER_POLICY` for details. + +To make Scrapy spiders easier to debug, Scrapy logs more stats by default +in 1.4: memory usage stats, detailed retry stats, detailed HTTP error code +stats. A similar change is that HTTP cache path is also visible in logs now. + +Last but not least, Scrapy now has the option to make JSON and XML items +more human-readable, with newlines between items and even custom indenting +offset, using the new :setting:`FEED_EXPORT_INDENT` setting. + +Enjoy! (Or read on for the rest of changes in this release.) + +Deprecations and Backward Incompatible Changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Default to ``canonicalize=False`` in + :class:`scrapy.linkextractors.LinkExtractor + ` + (:issue:`2537`, fixes :issue:`1941` and :issue:`1982`): + **warning, this is technically backward-incompatible** +- Enable memusage extension by default (:issue:`2539`, fixes :issue:`2187`); + **this is technically backward-incompatible** so please check if you have + any non-default ``MEMUSAGE_***`` options set. +- ``EDITOR`` environment variable now takes precedence over ``EDITOR`` + option defined in settings.py (:issue:`1829`); Scrapy default settings + no longer depend on environment variables. **This is technically a backward + incompatible change**. +- ``Spider.make_requests_from_url`` is deprecated + (:issue:`1728`, fixes :issue:`1495`). + +New Features +~~~~~~~~~~~~ + +- Accept proxy credentials in :reqmeta:`proxy` request meta key (:issue:`2526`) +- Support `brotli-compressed`_ content; requires optional `brotlipy`_ + (:issue:`2535`) +- New :ref:`response.follow ` shortcut + for creating requests (:issue:`1940`) +- Added ``flags`` argument and attribute to :class:`Request ` + objects (:issue:`2047`) +- Support Anonymous FTP (:issue:`2342`) +- Added ``retry/count``, ``retry/max_reached`` and ``retry/reason_count/`` + stats to :class:`RetryMiddleware ` + (:issue:`2543`) +- Added ``httperror/response_ignored_count`` and ``httperror/response_ignored_status_count/`` + stats to :class:`HttpErrorMiddleware ` + (:issue:`2566`) +- Customizable :setting:`Referrer policy ` in + :class:`RefererMiddleware ` + (:issue:`2306`) +- New ``data:`` URI download handler (:issue:`2334`, fixes :issue:`2156`) +- Log cache directory when HTTP Cache is used (:issue:`2611`, fixes :issue:`2604`) +- Warn users when project contains duplicate spider names (fixes :issue:`2181`) +- ``scrapy.utils.datatypes.CaselessDict`` now accepts ``Mapping`` instances and + not only dicts (:issue:`2646`) +- :ref:`Media downloads `, with + :class:`~scrapy.pipelines.files.FilesPipeline` or + :class:`~scrapy.pipelines.images.ImagesPipeline`, can now optionally handle + HTTP redirects using the new :setting:`MEDIA_ALLOW_REDIRECTS` setting + (:issue:`2616`, fixes :issue:`2004`) +- Accept non-complete responses from websites using a new + :setting:`DOWNLOAD_FAIL_ON_DATALOSS` setting (:issue:`2590`, fixes :issue:`2586`) +- Optional pretty-printing of JSON and XML items via + :setting:`FEED_EXPORT_INDENT` setting (:issue:`2456`, fixes :issue:`1327`) +- Allow dropping fields in ``FormRequest.from_response`` formdata when + ``None`` value is passed (:issue:`667`) +- Per-request retry times with the new :reqmeta:`max_retry_times` meta key + (:issue:`2642`) +- ``python -m scrapy`` as a more explicit alternative to ``scrapy`` command + (:issue:`2740`) + +.. _brotli-compressed: https://www.ietf.org/rfc/rfc7932.txt +.. _brotlipy: https://github.com/python-hyper/brotlipy/ + +Bug fixes +~~~~~~~~~ + +- LinkExtractor now strips leading and trailing whitespaces from attributes + (:issue:`2547`, fixes :issue:`1614`) +- Properly handle whitespaces in action attribute in + :class:`~scrapy.http.FormRequest` (:issue:`2548`) +- Buffer CONNECT response bytes from proxy until all HTTP headers are received + (:issue:`2495`, fixes :issue:`2491`) +- FTP downloader now works on Python 3, provided you use Twisted>=17.1 + (:issue:`2599`) +- Use body to choose response type after decompressing content (:issue:`2393`, + fixes :issue:`2145`) +- Always decompress ``Content-Encoding: gzip`` at :class:`HttpCompressionMiddleware + ` stage (:issue:`2391`) +- Respect custom log level in ``Spider.custom_settings`` (:issue:`2581`, + fixes :issue:`1612`) +- 'make htmlview' fix for macOS (:issue:`2661`) +- Remove "commands" from the command list (:issue:`2695`) +- Fix duplicate Content-Length header for POST requests with empty body (:issue:`2677`) +- Properly cancel large downloads, i.e. above :setting:`DOWNLOAD_MAXSIZE` (:issue:`1616`) +- ImagesPipeline: fixed processing of transparent PNG images with palette + (:issue:`2675`) + +Cleanups & Refactoring +~~~~~~~~~~~~~~~~~~~~~~ + +- Tests: remove temp files and folders (:issue:`2570`), + fixed ProjectUtilsTest on macOS (:issue:`2569`), + use portable pypy for Linux on Travis CI (:issue:`2710`) +- Separate building request from ``_requests_to_follow`` in CrawlSpider (:issue:`2562`) +- Remove “Python 3 progress” badge (:issue:`2567`) +- Add a couple more lines to ``.gitignore`` (:issue:`2557`) +- Remove bumpversion prerelease configuration (:issue:`2159`) +- Add codecov.yml file (:issue:`2750`) +- Set context factory implementation based on Twisted version (:issue:`2577`, + fixes :issue:`2560`) +- Add omitted ``self`` arguments in default project middleware template (:issue:`2595`) +- Remove redundant ``slot.add_request()`` call in ExecutionEngine (:issue:`2617`) +- Catch more specific ``os.error`` exception in + ``scrapy.pipelines.files.FSFilesStore`` (:issue:`2644`) +- Change "localhost" test server certificate (:issue:`2720`) +- Remove unused ``MEMUSAGE_REPORT`` setting (:issue:`2576`) + +Documentation +~~~~~~~~~~~~~ + +- Binary mode is required for exporters (:issue:`2564`, fixes :issue:`2553`) +- Mention issue with :meth:`FormRequest.from_response + ` due to bug in lxml (:issue:`2572`) +- Use single quotes uniformly in templates (:issue:`2596`) +- Document :reqmeta:`ftp_user` and :reqmeta:`ftp_password` meta keys (:issue:`2587`) +- Removed section on deprecated ``contrib/`` (:issue:`2636`) +- Recommend Anaconda when installing Scrapy on Windows + (:issue:`2477`, fixes :issue:`2475`) +- FAQ: rewrite note on Python 3 support on Windows (:issue:`2690`) +- Rearrange selector sections (:issue:`2705`) +- Remove ``__nonzero__`` from :class:`~scrapy.selector.SelectorList` + docs (:issue:`2683`) +- Mention how to disable request filtering in documentation of + :setting:`DUPEFILTER_CLASS` setting (:issue:`2714`) +- Add sphinx_rtd_theme to docs setup readme (:issue:`2668`) +- Open file in text mode in JSON item writer example (:issue:`2729`) +- Clarify ``allowed_domains`` example (:issue:`2670`) + + +.. _release-1.3.3: + +Scrapy 1.3.3 (2017-03-10) +------------------------- + +Bug fixes +~~~~~~~~~ + +- Make ``SpiderLoader`` raise ``ImportError`` again by default for missing + dependencies and wrong :setting:`SPIDER_MODULES`. + These exceptions were silenced as warnings since 1.3.0. + A new setting is introduced to toggle between warning or exception if needed ; + see :setting:`SPIDER_LOADER_WARN_ONLY` for details. + +.. _release-1.3.2: + +Scrapy 1.3.2 (2017-02-13) +------------------------- + +Bug fixes +~~~~~~~~~ + +- Preserve request class when converting to/from dicts (utils.reqser) (:issue:`2510`). +- Use consistent selectors for author field in tutorial (:issue:`2551`). +- Fix TLS compatibility in Twisted 17+ (:issue:`2558`) + +.. _release-1.3.1: + +Scrapy 1.3.1 (2017-02-08) +------------------------- + +New features +~~~~~~~~~~~~ + +- Support ``'True'`` and ``'False'`` string values for boolean settings (:issue:`2519`); + you can now do something like ``scrapy crawl myspider -s REDIRECT_ENABLED=False``. +- Support kwargs with ``response.xpath()`` to use :ref:`XPath variables ` + and ad-hoc namespaces declarations ; + this requires at least Parsel v1.1 (:issue:`2457`). +- Add support for Python 3.6 (:issue:`2485`). +- Run tests on PyPy (warning: some tests still fail, so PyPy is not supported yet). + +Bug fixes +~~~~~~~~~ + +- Enforce ``DNS_TIMEOUT`` setting (:issue:`2496`). +- Fix :command:`view` command ; it was a regression in v1.3.0 (:issue:`2503`). +- Fix tests regarding ``*_EXPIRES settings`` with Files/Images pipelines (:issue:`2460`). +- Fix name of generated pipeline class when using basic project template (:issue:`2466`). +- Fix compatibility with Twisted 17+ (:issue:`2496`, :issue:`2528`). +- Fix ``scrapy.Item`` inheritance on Python 3.6 (:issue:`2511`). +- Enforce numeric values for components order in ``SPIDER_MIDDLEWARES``, + ``DOWNLOADER_MIDDLEWARES``, ``EXTENSIONS`` and ``SPIDER_CONTRACTS`` (:issue:`2420`). + +Documentation +~~~~~~~~~~~~~ + +- Reword Code of Conduct section and upgrade to Contributor Covenant v1.4 + (:issue:`2469`). +- Clarify that passing spider arguments converts them to spider attributes + (:issue:`2483`). +- Document ``formid`` argument on ``FormRequest.from_response()`` (:issue:`2497`). +- Add .rst extension to README files (:issue:`2507`). +- Mention LevelDB cache storage backend (:issue:`2525`). +- Use ``yield`` in sample callback code (:issue:`2533`). +- Add note about HTML entities decoding with ``.re()/.re_first()`` (:issue:`1704`). +- Typos (:issue:`2512`, :issue:`2534`, :issue:`2531`). + +Cleanups +~~~~~~~~ + +- Remove redundant check in ``MetaRefreshMiddleware`` (:issue:`2542`). +- Faster checks in ``LinkExtractor`` for allow/deny patterns (:issue:`2538`). +- Remove dead code supporting old Twisted versions (:issue:`2544`). + + +.. _release-1.3.0: + +Scrapy 1.3.0 (2016-12-21) +------------------------- + +This release comes rather soon after 1.2.2 for one main reason: +it was found out that releases since 0.18 up to 1.2.2 (included) use +some backported code from Twisted (``scrapy.xlib.tx.*``), +even if newer Twisted modules are available. +Scrapy now uses ``twisted.web.client`` and ``twisted.internet.endpoints`` directly. +(See also cleanups below.) + +As it is a major change, we wanted to get the bug fix out quickly +while not breaking any projects using the 1.2 series. + +New Features +~~~~~~~~~~~~ + +- ``MailSender`` now accepts single strings as values for ``to`` and ``cc`` + arguments (:issue:`2272`) +- ``scrapy fetch url``, ``scrapy shell url`` and ``fetch(url)`` inside + Scrapy shell now follow HTTP redirections by default (:issue:`2290`); + See :command:`fetch` and :command:`shell` for details. +- ``HttpErrorMiddleware`` now logs errors with ``INFO`` level instead of ``DEBUG``; + this is technically **backward incompatible** so please check your log parsers. +- By default, logger names now use a long-form path, e.g. ``[scrapy.extensions.logstats]``, + instead of the shorter "top-level" variant of prior releases (e.g. ``[scrapy]``); + this is **backward incompatible** if you have log parsers expecting the short + logger name part. You can switch back to short logger names using :setting:`LOG_SHORT_NAMES` + set to ``True``. + +Dependencies & Cleanups +~~~~~~~~~~~~~~~~~~~~~~~ + +- Scrapy now requires Twisted >= 13.1 which is the case for many Linux + distributions already. +- As a consequence, we got rid of ``scrapy.xlib.tx.*`` modules, which + copied some of Twisted code for users stuck with an "old" Twisted version +- ``ChunkedTransferMiddleware`` is deprecated and removed from the default + downloader middlewares. + +.. _release-1.2.3: + +Scrapy 1.2.3 (2017-03-03) +------------------------- + +- Packaging fix: disallow unsupported Twisted versions in setup.py + + +.. _release-1.2.2: + +Scrapy 1.2.2 (2016-12-06) +------------------------- + +Bug fixes +~~~~~~~~~ + +- Fix a cryptic traceback when a pipeline fails on ``open_spider()`` (:issue:`2011`) +- Fix embedded IPython shell variables (fixing :issue:`396` that re-appeared + in 1.2.0, fixed in :issue:`2418`) +- A couple of patches when dealing with robots.txt: + + - handle (non-standard) relative sitemap URLs (:issue:`2390`) + - handle non-ASCII URLs and User-Agents in Python 2 (:issue:`2373`) + +Documentation +~~~~~~~~~~~~~ + +- Document ``"download_latency"`` key in ``Request``'s ``meta`` dict (:issue:`2033`) +- Remove page on (deprecated & unsupported) Ubuntu packages from ToC (:issue:`2335`) +- A few fixed typos (:issue:`2346`, :issue:`2369`, :issue:`2369`, :issue:`2380`) + and clarifications (:issue:`2354`, :issue:`2325`, :issue:`2414`) + +Other changes +~~~~~~~~~~~~~ + +- Advertize `conda-forge`_ as Scrapy's official conda channel (:issue:`2387`) +- More helpful error messages when trying to use ``.css()`` or ``.xpath()`` + on non-Text Responses (:issue:`2264`) +- ``startproject`` command now generates a sample ``middlewares.py`` file (:issue:`2335`) +- Add more dependencies' version info in ``scrapy version`` verbose output (:issue:`2404`) +- Remove all ``*.pyc`` files from source distribution (:issue:`2386`) + +.. _conda-forge: https://anaconda.org/conda-forge/scrapy + + +.. _release-1.2.1: + +Scrapy 1.2.1 (2016-10-21) +------------------------- + +Bug fixes +~~~~~~~~~ + +- Include OpenSSL's more permissive default ciphers when establishing + TLS/SSL connections (:issue:`2314`). +- Fix "Location" HTTP header decoding on non-ASCII URL redirects (:issue:`2321`). + +Documentation +~~~~~~~~~~~~~ + +- Fix JsonWriterPipeline example (:issue:`2302`). +- Various notes: :issue:`2330` on spider names, + :issue:`2329` on middleware methods processing order, + :issue:`2327` on getting multi-valued HTTP headers as lists. + +Other changes +~~~~~~~~~~~~~ + +- Removed ``www.`` from ``start_urls`` in built-in spider templates (:issue:`2299`). + + +.. _release-1.2.0: + +Scrapy 1.2.0 (2016-10-03) +------------------------- + +New Features +~~~~~~~~~~~~ + +- New :setting:`FEED_EXPORT_ENCODING` setting to customize the encoding + used when writing items to a file. + This can be used to turn off ``\uXXXX`` escapes in JSON output. + This is also useful for those wanting something else than UTF-8 + for XML or CSV output (:issue:`2034`). +- ``startproject`` command now supports an optional destination directory + to override the default one based on the project name (:issue:`2005`). +- New :setting:`SCHEDULER_DEBUG` setting to log requests serialization + failures (:issue:`1610`). +- JSON encoder now supports serialization of ``set`` instances (:issue:`2058`). +- Interpret ``application/json-amazonui-streaming`` as ``TextResponse`` (:issue:`1503`). +- ``scrapy`` is imported by default when using shell tools (:command:`shell`, + :ref:`inspect_response `) (:issue:`2248`). + +Bug fixes +~~~~~~~~~ + +- DefaultRequestHeaders middleware now runs before UserAgent middleware + (:issue:`2088`). **Warning: this is technically backward incompatible**, + though we consider this a bug fix. +- HTTP cache extension and plugins that use the ``.scrapy`` data directory now + work outside projects (:issue:`1581`). **Warning: this is technically + backward incompatible**, though we consider this a bug fix. +- ``Selector`` does not allow passing both ``response`` and ``text`` anymore + (:issue:`2153`). +- Fixed logging of wrong callback name with ``scrapy parse`` (:issue:`2169`). +- Fix for an odd gzip decompression bug (:issue:`1606`). +- Fix for selected callbacks when using ``CrawlSpider`` with :command:`scrapy parse ` + (:issue:`2225`). +- Fix for invalid JSON and XML files when spider yields no items (:issue:`872`). +- Implement ``flush()`` for ``StreamLogger`` avoiding a warning in logs (:issue:`2125`). + +Refactoring +~~~~~~~~~~~ + +- ``canonicalize_url`` has been moved to `w3lib.url`_ (:issue:`2168`). + +.. _w3lib.url: https://w3lib.readthedocs.io/en/latest/w3lib.html#w3lib.url.canonicalize_url + +Tests & Requirements +~~~~~~~~~~~~~~~~~~~~ + +Scrapy's new requirements baseline is Debian 8 "Jessie". It was previously +Ubuntu 12.04 Precise. +What this means in practice is that we run continuous integration tests +with these (main) packages versions at a minimum: +Twisted 14.0, pyOpenSSL 0.14, lxml 3.4. + +Scrapy may very well work with older versions of these packages +(the code base still has switches for older Twisted versions for example) +but it is not guaranteed (because it's not tested anymore). + +Documentation +~~~~~~~~~~~~~ + +- Grammar fixes: :issue:`2128`, :issue:`1566`. +- Download stats badge removed from README (:issue:`2160`). +- New Scrapy :ref:`architecture diagram ` (:issue:`2165`). +- Updated ``Response`` parameters documentation (:issue:`2197`). +- Reworded misleading :setting:`RANDOMIZE_DOWNLOAD_DELAY` description (:issue:`2190`). +- Add StackOverflow as a support channel (:issue:`2257`). + +.. _release-1.1.4: + +Scrapy 1.1.4 (2017-03-03) +------------------------- + +- Packaging fix: disallow unsupported Twisted versions in setup.py + +.. _release-1.1.3: + +Scrapy 1.1.3 (2016-09-22) +------------------------- + +Bug fixes +~~~~~~~~~ + +- Class attributes for subclasses of ``ImagesPipeline`` and ``FilesPipeline`` + work as they did before 1.1.1 (:issue:`2243`, fixes :issue:`2198`) + +Documentation +~~~~~~~~~~~~~ + +- :ref:`Overview ` and :ref:`tutorial ` + rewritten to use http://toscrape.com websites + (:issue:`2236`, :issue:`2249`, :issue:`2252`). + +.. _release-1.1.2: + +Scrapy 1.1.2 (2016-08-18) +------------------------- + +Bug fixes +~~~~~~~~~ + +- Introduce a missing :setting:`IMAGES_STORE_S3_ACL` setting to override + the default ACL policy in ``ImagesPipeline`` when uploading images to S3 + (note that default ACL policy is "private" -- instead of "public-read" -- + since Scrapy 1.1.0) +- :setting:`IMAGES_EXPIRES` default value set back to 90 + (the regression was introduced in 1.1.1) + +.. _release-1.1.1: + +Scrapy 1.1.1 (2016-07-13) +------------------------- + +Bug fixes +~~~~~~~~~ + +- Add "Host" header in CONNECT requests to HTTPS proxies (:issue:`2069`) +- Use response ``body`` when choosing response class + (:issue:`2001`, fixes :issue:`2000`) +- Do not fail on canonicalizing URLs with wrong netlocs + (:issue:`2038`, fixes :issue:`2010`) +- a few fixes for ``HttpCompressionMiddleware`` (and ``SitemapSpider``): + + - Do not decode HEAD responses (:issue:`2008`, fixes :issue:`1899`) + - Handle charset parameter in gzip Content-Type header + (:issue:`2050`, fixes :issue:`2049`) + - Do not decompress gzip octet-stream responses + (:issue:`2065`, fixes :issue:`2063`) + +- Catch (and ignore with a warning) exception when verifying certificate + against IP-address hosts (:issue:`2094`, fixes :issue:`2092`) +- Make ``FilesPipeline`` and ``ImagesPipeline`` backward compatible again + regarding the use of legacy class attributes for customization + (:issue:`1989`, fixes :issue:`1985`) + + +New features +~~~~~~~~~~~~ + +- Enable genspider command outside project folder (:issue:`2052`) +- Retry HTTPS CONNECT ``TunnelError`` by default (:issue:`1974`) + + +Documentation +~~~~~~~~~~~~~ + +- ``FEED_TEMPDIR`` setting at lexicographical position (:commit:`9b3c72c`) +- Use idiomatic ``.extract_first()`` in overview (:issue:`1994`) +- Update years in copyright notice (:commit:`c2c8036`) +- Add information and example on errbacks (:issue:`1995`) +- Use "url" variable in downloader middleware example (:issue:`2015`) +- Grammar fixes (:issue:`2054`, :issue:`2120`) +- New FAQ entry on using BeautifulSoup in spider callbacks (:issue:`2048`) +- Add notes about Scrapy not working on Windows with Python 3 (:issue:`2060`) +- Encourage complete titles in pull requests (:issue:`2026`) + +Tests +~~~~~ + +- Upgrade py.test requirement on Travis CI and Pin pytest-cov to 2.2.1 (:issue:`2095`) + +.. _release-1.1.0: + +Scrapy 1.1.0 (2016-05-11) +------------------------- + +This 1.1 release brings a lot of interesting features and bug fixes: + +- Scrapy 1.1 has beta Python 3 support (requires Twisted >= 15.5). See + :ref:`news_betapy3` for more details and some limitations. +- Hot new features: + + - Item loaders now support nested loaders (:issue:`1467`). + - ``FormRequest.from_response`` improvements (:issue:`1382`, :issue:`1137`). + - Added setting :setting:`AUTOTHROTTLE_TARGET_CONCURRENCY` and improved + AutoThrottle docs (:issue:`1324`). + - Added ``response.text`` to get body as unicode (:issue:`1730`). + - Anonymous S3 connections (:issue:`1358`). + - Deferreds in downloader middlewares (:issue:`1473`). This enables better + robots.txt handling (:issue:`1471`). + - HTTP caching now follows RFC2616 more closely, added settings + :setting:`HTTPCACHE_ALWAYS_STORE` and + :setting:`HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS` (:issue:`1151`). + - Selectors were extracted to the parsel_ library (:issue:`1409`). This means + you can use Scrapy Selectors without Scrapy and also upgrade the + selectors engine without needing to upgrade Scrapy. + - HTTPS downloader now does TLS protocol negotiation by default, + instead of forcing TLS 1.0. You can also set the SSL/TLS method + using the new :setting:`DOWNLOADER_CLIENT_TLS_METHOD`. + +- These bug fixes may require your attention: + + - Don't retry bad requests (HTTP 400) by default (:issue:`1289`). + If you need the old behavior, add ``400`` to :setting:`RETRY_HTTP_CODES`. + - Fix shell files argument handling (:issue:`1710`, :issue:`1550`). + If you try ``scrapy shell index.html`` it will try to load the URL http://index.html, + use ``scrapy shell ./index.html`` to load a local file. + - Robots.txt compliance is now enabled by default for newly-created projects + (:issue:`1724`). Scrapy will also wait for robots.txt to be downloaded + before proceeding with the crawl (:issue:`1735`). If you want to disable + this behavior, update :setting:`ROBOTSTXT_OBEY` in ``settings.py`` file + after creating a new project. + - Exporters now work on unicode, instead of bytes by default (:issue:`1080`). + If you use :class:`~scrapy.exporters.PythonItemExporter`, you may want to + update your code to disable binary mode which is now deprecated. + - Accept XML node names containing dots as valid (:issue:`1533`). + - When uploading files or images to S3 (with ``FilesPipeline`` or + ``ImagesPipeline``), the default ACL policy is now "private" instead + of "public" **Warning: backward incompatible!**. + You can use :setting:`FILES_STORE_S3_ACL` to change it. + - We've reimplemented ``canonicalize_url()`` for more correct output, + especially for URLs with non-ASCII characters (:issue:`1947`). + This could change link extractors output compared to previous Scrapy versions. + This may also invalidate some cache entries you could still have from pre-1.1 runs. + **Warning: backward incompatible!**. + +Keep reading for more details on other improvements and bug fixes. + +.. _news_betapy3: + +Beta Python 3 Support +~~~~~~~~~~~~~~~~~~~~~ + +We have been `hard at work to make Scrapy run on Python 3 +`_. As a result, now +you can run spiders on Python 3.3, 3.4 and 3.5 (Twisted >= 15.5 required). Some +features are still missing (and some may never be ported). + + +Almost all builtin extensions/middlewares are expected to work. +However, we are aware of some limitations in Python 3: + +- Scrapy does not work on Windows with Python 3 +- Sending emails is not supported +- FTP download handler is not supported +- Telnet console is not supported + +Additional New Features and Enhancements +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +- Scrapy now has a `Code of Conduct`_ (:issue:`1681`). +- Command line tool now has completion for zsh (:issue:`934`). +- Improvements to ``scrapy shell``: + + - Support for bpython and configure preferred Python shell via + ``SCRAPY_PYTHON_SHELL`` (:issue:`1100`, :issue:`1444`). + - Support URLs without scheme (:issue:`1498`) + **Warning: backward incompatible!** + - Bring back support for relative file path (:issue:`1710`, :issue:`1550`). + +- Added :setting:`MEMUSAGE_CHECK_INTERVAL_SECONDS` setting to change default check + interval (:issue:`1282`). +- Download handlers are now lazy-loaded on first request using their + scheme (:issue:`1390`, :issue:`1421`). +- HTTPS download handlers do not force TLS 1.0 anymore; instead, + OpenSSL's ``SSLv23_method()/TLS_method()`` is used allowing to try + negotiating with the remote hosts the highest TLS protocol version + it can (:issue:`1794`, :issue:`1629`). +- ``RedirectMiddleware`` now skips the status codes from + ``handle_httpstatus_list`` on spider attribute + or in ``Request``'s ``meta`` key (:issue:`1334`, :issue:`1364`, + :issue:`1447`). +- Form submission: + + - now works with ``