From 9505c444f3d1ba50e3aa58c01dd1385fdf3cd731 Mon Sep 17 00:00:00 2001
From: Jerry Liu <jerryjliu98@gmail.com>
Date: Mon, 10 Jul 2023 17:00:07 -0700
Subject: [PATCH] make docs great again (#6823)

---
 docs/_static/getting_started/indexing.jpg     | Bin 0 -> 27552 bytes
 docs/_static/getting_started/querying.jpg     | Bin 0 -> 50148 bytes
 docs/_static/getting_started/rag.jpg          | Bin 0 -> 36897 bytes
 docs/_static/storage/storage.png              | Bin 32228 -> 41066 bytes
 .../callbacks.rst                             |   0
 .../composability.rst                         |   0
 .../example_notebooks.rst                     |   0
 docs/api_reference/index.rst                  |  27 ++
 docs/{reference => api_reference}/indices.rst |   0
 .../indices/empty.rst                         |   0
 .../indices/kg.rst                            |   0
 .../indices/list.rst                          |   0
 .../indices/struct_store.rst                  |   0
 .../indices/table.rst                         |   0
 .../indices/tree.rst                          |   0
 .../indices/vector_store.rst                  |   0
 .../langchain_integrations/base.rst           |   0
 .../llm_predictor.rst                         |   0
 docs/{reference => api_reference}/llms.rst    |   0
 .../llms/azure_openai.rst                     |   0
 .../llms/huggingface.rst                      |   0
 .../llms/langchain.rst                        |   0
 .../llms/openai.rst                           |   0
 docs/{reference => api_reference}/node.rst    |   0
 .../node_postprocessor.rst                    |   0
 .../playground.rst                            |   0
 docs/{reference => api_reference}/prompts.rst |   0
 docs/{reference => api_reference}/query.rst   |   0
 .../query/chat_engines.rst                    |   0
 .../condense_question_chat_engine.rst         |   0
 .../query/chat_engines/react_chat_engine.rst  |   0
 .../query/chat_engines/simple_chat_engine.rst |   0
 .../query/query_bundle.rst                    |   0
 .../query/query_engines.rst                   |   0
 .../query_engines/citation_query_engine.rst   |   6 +
 .../query_engines/flare_query_engine.rst      |   6 +
 .../query_engines/graph_query_engine.rst      |   0
 .../query_engines/multistep_query_engine.rst  |   0
 .../query_engines/pandas_query_engine.rst     |   0
 .../query_engines/retriever_query_engine.rst  |   0
 .../retriever_router_query_engine.rst         |   6 +
 .../query_engines/router_query_engine.rst     |   3 +-
 .../query_engines/sql_join_query_engine.rst   |   6 +
 .../query/query_engines/sql_query_engine.rst  |   0
 .../sub_question_query_engine.rst             |   0
 .../query_engines/transform_query_engine.rst  |   0
 .../query/query_transform.rst                 |   0
 .../query/response_synthesizer.rst            |   8 +
 .../query/retrievers.rst                      |   0
 .../query/retrievers/empty.rst                |   0
 .../query/retrievers/kg.rst                   |   0
 .../query/retrievers/list.rst                 |   0
 .../query/retrievers/table.rst                |   0
 .../query/retrievers/transform.rst            |   0
 .../query/retrievers/tree.rst                 |   0
 .../query/retrievers/vector_store.rst         |   0
 docs/{reference => api_reference}/readers.rst |   0
 .../{reference => api_reference}/response.rst |   0
 .../service_context.rst                       |   4 +-
 .../service_context/embeddings.rst            |   0
 .../service_context}/node_parser.rst          |   0
 .../service_context/prompt_helper.rst         |   0
 docs/{reference => api_reference}/storage.rst |   0
 .../storage/docstore.rst                      |   0
 .../storage/index_store.rst                   |   0
 .../storage/indices_save_load.rst             |   0
 .../storage/kv_store.rst                      |   0
 .../storage/vector_store.rst                  |   0
 .../struct_store.rst                          |   0
 docs/{gallery => community}/app_showcase.md   |   2 +-
 docs/community/integrations.md                |  15 +
 .../integrations/chatgpt_plugins.md           |   0
 .../integrations/graphsignal.md               |   0
 .../integrations/guidance.md                  |   0
 .../integrations/trulens.md                   |   0
 .../integrations/using_with_langchain.md      |   2 +-
 .../integrations/vector_stores.md             |   2 +-
 .../data_modules/connector/modules.md         |  31 ++
 .../data_modules}/connector/root.md           |   7 +-
 .../data_modules}/connector/usage_pattern.md  |   0
 .../data_modules/documents_and_nodes/root.md  |  64 +++
 .../documents_and_nodes/usage_documents.md}   |  40 +-
 .../usage_metadata_extractor.md               |  43 ++
 .../documents_and_nodes/usage_nodes.md        |  35 ++
 .../data_modules}/index/composability.md      |   8 +-
 .../index/document_management.md              |   0
 .../data_modules/index}/index_guide.md        |   0
 .../index/index_progress_bars.ipynb           |   0
 .../index/metadata_extraction.md              |   0
 .../data_modules}/index/modules.md            |   7 +-
 .../data_modules}/index/root.md               |  17 +-
 .../data_modules}/index/usage_pattern.md      |   4 +-
 .../index/vector_store_guide.ipynb            |   0
 .../data_modules/node_parsers/root.md         |  24 ++
 .../node_parsers/usage_pattern.md             |  80 ++++
 .../data_modules}/storage/customization.md    |   2 +-
 .../data_modules}/storage/docstores.md        |   2 +-
 .../data_modules}/storage/index_stores.md     |   2 +-
 .../data_modules}/storage/kv_stores.md        |   4 +-
 .../core_modules/data_modules/storage/root.md |  91 +++++
 .../data_modules}/storage/save_load.md        |   2 +-
 .../data_modules/storage/vector_stores.md     |  65 +++
 .../model_modules/embeddings/modules.md       |  13 +
 .../model_modules/embeddings/root.md          |  42 ++
 .../model_modules/embeddings/usage_pattern.md | 101 +++++
 .../model_modules/llms/modules.md             |  42 ++
 docs/core_modules/model_modules/llms/root.md  |  49 +++
 .../model_modules/llms/usage_custom.md}       |  26 +-
 .../model_modules/llms/usage_standalone.md    |  35 ++
 .../model_modules/prompts.md}                 |  94 +++--
 .../query_modules/chat_engines}/modules.md    |   0
 .../query_modules/chat_engines}/root.md       |  16 +-
 .../chat_engines}/usage_pattern.md            |  23 +-
 .../node_postprocessors/modules.md            | 222 +++++++++++
 .../query_modules/node_postprocessors/root.md |  49 +++
 .../node_postprocessors/usage_pattern.md      |  93 +++++
 .../advanced/query_transformations.md         |   0
 .../query_modules}/query_engine/modules.md    |   0
 .../query_engine/response_modes.md            |   2 +-
 .../query_modules}/query_engine/root.md       |  20 +-
 .../query_modules/query_engine}/streaming.md  |  13 +-
 .../query_engine/supporting_modules.md        |   8 +
 .../query_engine/usage_pattern.md             |  25 +-
 .../response_synthesizers/modules.md          |  62 +++
 .../response_synthesizers/root.md             |  50 +++
 .../response_synthesizers/usage_pattern.md    |  95 +++++
 .../query_modules}/retriever/modules.md       |   2 +-
 .../retriever/retriever_modes.md              |   2 +-
 .../query_modules/retriever/root.md           |  37 ++
 .../query_modules}/retriever/usage_pattern.md |   8 +-
 .../structured_outputs/output_parser.md       |  13 +-
 .../structured_outputs/pydantic_program.md    |   0
 .../query_modules}/structured_outputs/root.md |   4 +-
 .../supporting_modules/callbacks/root.md      |  50 +++
 .../callbacks/token_counting_migration.md     |   2 +-
 .../supporting_modules/cost_analysis/root.md} |  89 ++---
 .../cost_analysis/usage_pattern.md            |  97 +++++
 .../supporting_modules/evaluation/modules.md  |  13 +
 .../supporting_modules/evaluation/root.md     |  64 +++
 .../evaluation/usage_pattern.md}              |  87 ++--
 .../supporting_modules/playground/root.md}    |  17 +-
 .../supporting_modules/service_context.md     | 103 +++++
 .../agents.md                                 |  10 +-
 docs/end_to_end_tutorials/apps.md             |  13 +
 .../apps}/fullstack_app_guide.md              |   0
 .../apps}/fullstack_with_delphic.md           |   0
 docs/end_to_end_tutorials/chatbots.md         |   7 +
 .../chatbots}/building_a_chatbot.md           |   0
 .../discover_llamaindex.md                    |   2 +-
 docs/end_to_end_tutorials/privacy.md          |   5 +
 .../question_and_answer.md}                   |  29 +-
 .../terms_definitions_tutorial.md             |   2 +-
 .../question_and_answer}/unified_query.md     |   2 +-
 docs/end_to_end_tutorials/structured_data.md  |   5 +
 .../structured_data}/Airbyte_demo.ipynb       |   0
 .../structured_data}/img/airbyte_1.png        | Bin
 .../structured_data}/img/airbyte_3.png        | Bin
 .../structured_data}/img/airbyte_6.png        | Bin
 .../structured_data}/img/airbyte_7.png        | Bin
 .../structured_data}/img/airbyte_8.png        | Bin
 .../structured_data}/img/airbyte_9.png        | Bin
 .../structured_data}/img/github_1.png         | Bin
 .../structured_data}/img/github_2.png         | Bin
 .../structured_data}/img/github_3.png         | Bin
 .../structured_data}/img/snowflake_1.png      | Bin
 .../structured_data}/img/snowflake_2.png      | Bin
 .../structured_data}/sql_guide.md             |   0
 .../usage_pattern.md                          |  30 +-
 docs/end_to_end_tutorials/use_cases.md        |  17 +
 docs/examples/analysis/PlaygroundDemo.ipynb   | 288 +++++---------
 docs/examples/analysis/TokenPredictor.ipynb   | 374 ------------------
 docs/examples/callbacks/AimCallback.ipynb     |   2 +-
 .../callbacks/LlamaDebugHandler.ipynb         |   2 +-
 .../callbacks/TokenCountingHandler.ipynb      |   2 +-
 .../callbacks/WandbCallbackHandler.ipynb      |  20 +-
 .../SimpleIndexDemo-Huggingface_camel.ipynb   |   3 +-
 ...SimpleIndexDemo-Huggingface_stablelm.ipynb |   3 +-
 .../customization/prompts/chat_prompts.ipynb  | 199 ++++++++++
 .../prompts/completion_prompts.ipynb          | 175 ++++++++
 docs/examples/embeddings/Langchain.ipynb      |  40 ++
 docs/examples/embeddings/OpenAI.ipynb         |  50 +++
 .../embeddings/custom_embeddings.ipynb        | 184 +++++++++
 .../evaluation/QuestionGeneration.ipynb       |   4 +-
 .../evaluation/TestNYC-Evaluation-Query.ipynb |   4 +-
 .../evaluation/TestNYC-Evaluation.ipynb       |   4 +-
 docs/examples/llm/palm.ipynb                  | 189 +++++++++
 .../refine.ipynb                              |   5 +-
 .../tree_summarize.ipynb                      |   5 +-
 docs/getting_started/FAQ.md                   |   1 +
 docs/getting_started/concepts.md              |  83 ++++
 docs/getting_started/customization.rst        | 181 +++++++++
 docs/getting_started/installation.md          |   4 +-
 docs/getting_started/starter_example.md       |  16 +-
 docs/guides/primer.rst                        |  21 -
 docs/guides/primer/query_interface.md         |  44 ---
 docs/guides/tutorials.rst                     |  24 --
 docs/how_to/analysis.rst                      |  16 -
 docs/how_to/callbacks.rst                     |  44 ---
 docs/how_to/connector/modules.md              |  31 --
 docs/how_to/customization.rst                 |  23 --
 docs/how_to/customization/embeddings.md       | 113 ------
 .../customization/llms_migration_guide.md     |  55 ---
 docs/how_to/customization/service_context.md  |  78 ----
 docs/how_to/integrations.rst                  |  17 -
 .../advanced/response_synthesis.md            |  39 --
 docs/how_to/query_engine/advanced/root.md     |  11 -
 .../query_engine/advanced/second_stage.md     | 216 ----------
 docs/how_to/retriever/root.md                 |  30 --
 docs/how_to/storage.rst                       |  39 --
 docs/how_to/storage/vector_stores.md          |  65 ---
 docs/index.rst                                | 102 +++--
 docs/reference/query/response_synthesizer.rst |  12 -
 .../service_context/llama_logger.rst          |   9 -
 .../service_context/llm_predictor.rst         |  17 -
 docs/use_cases/apps.md                        |  22 --
 .../indices/tree/all_leaf_retriever.py        |   4 +-
 .../indices/tree/tree_root_retriever.py       |   4 +-
 llama_index/llms/__init__.py                  |   2 +
 llama_index/llms/mock.py                      |  28 +-
 llama_index/playground/base.py                |  17 +-
 220 files changed, 3423 insertions(+), 1903 deletions(-)
 create mode 100644 docs/_static/getting_started/indexing.jpg
 create mode 100644 docs/_static/getting_started/querying.jpg
 create mode 100644 docs/_static/getting_started/rag.jpg
 rename docs/{reference => api_reference}/callbacks.rst (100%)
 rename docs/{reference => api_reference}/composability.rst (100%)
 rename docs/{reference => api_reference}/example_notebooks.rst (100%)
 create mode 100644 docs/api_reference/index.rst
 rename docs/{reference => api_reference}/indices.rst (100%)
 rename docs/{reference => api_reference}/indices/empty.rst (100%)
 rename docs/{reference => api_reference}/indices/kg.rst (100%)
 rename docs/{reference => api_reference}/indices/list.rst (100%)
 rename docs/{reference => api_reference}/indices/struct_store.rst (100%)
 rename docs/{reference => api_reference}/indices/table.rst (100%)
 rename docs/{reference => api_reference}/indices/tree.rst (100%)
 rename docs/{reference => api_reference}/indices/vector_store.rst (100%)
 rename docs/{reference => api_reference}/langchain_integrations/base.rst (100%)
 rename docs/{reference => api_reference}/llm_predictor.rst (100%)
 rename docs/{reference => api_reference}/llms.rst (100%)
 rename docs/{reference => api_reference}/llms/azure_openai.rst (100%)
 rename docs/{reference => api_reference}/llms/huggingface.rst (100%)
 rename docs/{reference => api_reference}/llms/langchain.rst (100%)
 rename docs/{reference => api_reference}/llms/openai.rst (100%)
 rename docs/{reference => api_reference}/node.rst (100%)
 rename docs/{reference => api_reference}/node_postprocessor.rst (100%)
 rename docs/{reference => api_reference}/playground.rst (100%)
 rename docs/{reference => api_reference}/prompts.rst (100%)
 rename docs/{reference => api_reference}/query.rst (100%)
 rename docs/{reference => api_reference}/query/chat_engines.rst (100%)
 rename docs/{reference => api_reference}/query/chat_engines/condense_question_chat_engine.rst (100%)
 rename docs/{reference => api_reference}/query/chat_engines/react_chat_engine.rst (100%)
 rename docs/{reference => api_reference}/query/chat_engines/simple_chat_engine.rst (100%)
 rename docs/{reference => api_reference}/query/query_bundle.rst (100%)
 rename docs/{reference => api_reference}/query/query_engines.rst (100%)
 create mode 100644 docs/api_reference/query/query_engines/citation_query_engine.rst
 create mode 100644 docs/api_reference/query/query_engines/flare_query_engine.rst
 rename docs/{reference => api_reference}/query/query_engines/graph_query_engine.rst (100%)
 rename docs/{reference => api_reference}/query/query_engines/multistep_query_engine.rst (100%)
 rename docs/{reference => api_reference}/query/query_engines/pandas_query_engine.rst (100%)
 rename docs/{reference => api_reference}/query/query_engines/retriever_query_engine.rst (100%)
 create mode 100644 docs/api_reference/query/query_engines/retriever_router_query_engine.rst
 rename docs/{reference => api_reference}/query/query_engines/router_query_engine.rst (51%)
 create mode 100644 docs/api_reference/query/query_engines/sql_join_query_engine.rst
 rename docs/{reference => api_reference}/query/query_engines/sql_query_engine.rst (100%)
 rename docs/{reference => api_reference}/query/query_engines/sub_question_query_engine.rst (100%)
 rename docs/{reference => api_reference}/query/query_engines/transform_query_engine.rst (100%)
 rename docs/{reference => api_reference}/query/query_transform.rst (100%)
 create mode 100644 docs/api_reference/query/response_synthesizer.rst
 rename docs/{reference => api_reference}/query/retrievers.rst (100%)
 rename docs/{reference => api_reference}/query/retrievers/empty.rst (100%)
 rename docs/{reference => api_reference}/query/retrievers/kg.rst (100%)
 rename docs/{reference => api_reference}/query/retrievers/list.rst (100%)
 rename docs/{reference => api_reference}/query/retrievers/table.rst (100%)
 rename docs/{reference => api_reference}/query/retrievers/transform.rst (100%)
 rename docs/{reference => api_reference}/query/retrievers/tree.rst (100%)
 rename docs/{reference => api_reference}/query/retrievers/vector_store.rst (100%)
 rename docs/{reference => api_reference}/readers.rst (100%)
 rename docs/{reference => api_reference}/response.rst (100%)
 rename docs/{reference => api_reference}/service_context.rst (90%)
 rename docs/{reference => api_reference}/service_context/embeddings.rst (100%)
 rename docs/{reference => api_reference/service_context}/node_parser.rst (100%)
 rename docs/{reference => api_reference}/service_context/prompt_helper.rst (100%)
 rename docs/{reference => api_reference}/storage.rst (100%)
 rename docs/{reference => api_reference}/storage/docstore.rst (100%)
 rename docs/{reference => api_reference}/storage/index_store.rst (100%)
 rename docs/{reference => api_reference}/storage/indices_save_load.rst (100%)
 rename docs/{reference => api_reference}/storage/kv_store.rst (100%)
 rename docs/{reference => api_reference}/storage/vector_store.rst (100%)
 rename docs/{reference => api_reference}/struct_store.rst (100%)
 rename docs/{gallery => community}/app_showcase.md (99%)
 create mode 100644 docs/community/integrations.md
 rename docs/{how_to => community}/integrations/chatgpt_plugins.md (100%)
 rename docs/{how_to => community}/integrations/graphsignal.md (100%)
 rename docs/{how_to => community}/integrations/guidance.md (100%)
 rename docs/{how_to => community}/integrations/trulens.md (100%)
 rename docs/{how_to => community}/integrations/using_with_langchain.md (97%)
 rename docs/{how_to => community}/integrations/vector_stores.md (99%)
 create mode 100644 docs/core_modules/data_modules/connector/modules.md
 rename docs/{how_to => core_modules/data_modules}/connector/root.md (80%)
 rename docs/{how_to => core_modules/data_modules}/connector/usage_pattern.md (100%)
 create mode 100644 docs/core_modules/data_modules/documents_and_nodes/root.md
 rename docs/{how_to/customization/custom_documents.md => core_modules/data_modules/documents_and_nodes/usage_documents.md} (86%)
 create mode 100644 docs/core_modules/data_modules/documents_and_nodes/usage_metadata_extractor.md
 create mode 100644 docs/core_modules/data_modules/documents_and_nodes/usage_nodes.md
 rename docs/{how_to => core_modules/data_modules}/index/composability.md (94%)
 rename docs/{how_to => core_modules/data_modules}/index/document_management.md (100%)
 rename docs/{guides/primer => core_modules/data_modules/index}/index_guide.md (100%)
 rename docs/{how_to => core_modules/data_modules}/index/index_progress_bars.ipynb (100%)
 rename docs/{how_to => core_modules/data_modules}/index/metadata_extraction.md (100%)
 rename docs/{how_to => core_modules/data_modules}/index/modules.md (68%)
 rename docs/{how_to => core_modules/data_modules}/index/root.md (58%)
 rename docs/{how_to => core_modules/data_modules}/index/usage_pattern.md (95%)
 rename docs/{how_to => core_modules/data_modules}/index/vector_store_guide.ipynb (100%)
 create mode 100644 docs/core_modules/data_modules/node_parsers/root.md
 create mode 100644 docs/core_modules/data_modules/node_parsers/usage_pattern.md
 rename docs/{how_to => core_modules/data_modules}/storage/customization.md (96%)
 rename docs/{how_to => core_modules/data_modules}/storage/docstores.md (97%)
 rename docs/{how_to => core_modules/data_modules}/storage/index_stores.md (97%)
 rename docs/{how_to => core_modules/data_modules}/storage/kv_stores.md (67%)
 create mode 100644 docs/core_modules/data_modules/storage/root.md
 rename docs/{how_to => core_modules/data_modules}/storage/save_load.md (97%)
 create mode 100644 docs/core_modules/data_modules/storage/vector_stores.md
 create mode 100644 docs/core_modules/model_modules/embeddings/modules.md
 create mode 100644 docs/core_modules/model_modules/embeddings/root.md
 create mode 100644 docs/core_modules/model_modules/embeddings/usage_pattern.md
 create mode 100644 docs/core_modules/model_modules/llms/modules.md
 create mode 100644 docs/core_modules/model_modules/llms/root.md
 rename docs/{how_to/customization/custom_llms.md => core_modules/model_modules/llms/usage_custom.md} (88%)
 create mode 100644 docs/core_modules/model_modules/llms/usage_standalone.md
 rename docs/{how_to/customization/custom_prompts.md => core_modules/model_modules/prompts.md} (59%)
 rename docs/{how_to/chat_engine => core_modules/query_modules/chat_engines}/modules.md (100%)
 rename docs/{how_to/chat_engine => core_modules/query_modules/chat_engines}/root.md (58%)
 rename docs/{how_to/chat_engine => core_modules/query_modules/chat_engines}/usage_pattern.md (77%)
 create mode 100644 docs/core_modules/query_modules/node_postprocessors/modules.md
 create mode 100644 docs/core_modules/query_modules/node_postprocessors/root.md
 create mode 100644 docs/core_modules/query_modules/node_postprocessors/usage_pattern.md
 rename docs/{how_to => core_modules/query_modules}/query_engine/advanced/query_transformations.md (100%)
 rename docs/{how_to => core_modules/query_modules}/query_engine/modules.md (100%)
 rename docs/{how_to => core_modules/query_modules}/query_engine/response_modes.md (92%)
 rename docs/{how_to => core_modules/query_modules}/query_engine/root.md (51%)
 rename docs/{how_to/customization => core_modules/query_modules/query_engine}/streaming.md (89%)
 create mode 100644 docs/core_modules/query_modules/query_engine/supporting_modules.md
 rename docs/{how_to => core_modules/query_modules}/query_engine/usage_pattern.md (70%)
 create mode 100644 docs/core_modules/query_modules/response_synthesizers/modules.md
 create mode 100644 docs/core_modules/query_modules/response_synthesizers/root.md
 create mode 100644 docs/core_modules/query_modules/response_synthesizers/usage_pattern.md
 rename docs/{how_to => core_modules/query_modules}/retriever/modules.md (91%)
 rename docs/{how_to => core_modules/query_modules}/retriever/retriever_modes.md (94%)
 create mode 100644 docs/core_modules/query_modules/retriever/root.md
 rename docs/{how_to => core_modules/query_modules}/retriever/usage_pattern.md (81%)
 rename docs/{how_to => core_modules/query_modules}/structured_outputs/output_parser.md (93%)
 rename docs/{how_to => core_modules/query_modules}/structured_outputs/pydantic_program.md (100%)
 rename docs/{how_to => core_modules/query_modules}/structured_outputs/root.md (93%)
 create mode 100644 docs/core_modules/supporting_modules/callbacks/root.md
 rename docs/{how_to => core_modules/supporting_modules}/callbacks/token_counting_migration.md (96%)
 rename docs/{how_to/analysis/cost_analysis.md => core_modules/supporting_modules/cost_analysis/root.md} (56%)
 create mode 100644 docs/core_modules/supporting_modules/cost_analysis/usage_pattern.md
 create mode 100644 docs/core_modules/supporting_modules/evaluation/modules.md
 create mode 100644 docs/core_modules/supporting_modules/evaluation/root.md
 rename docs/{how_to/evaluation/evaluation.md => core_modules/supporting_modules/evaluation/usage_pattern.md} (55%)
 rename docs/{how_to/analysis/playground.md => core_modules/supporting_modules/playground/root.md} (80%)
 create mode 100644 docs/core_modules/supporting_modules/service_context.md
 rename docs/{use_cases => end_to_end_tutorials}/agents.md (91%)
 create mode 100644 docs/end_to_end_tutorials/apps.md
 rename docs/{guides/tutorials => end_to_end_tutorials/apps}/fullstack_app_guide.md (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/apps}/fullstack_with_delphic.md (100%)
 create mode 100644 docs/end_to_end_tutorials/chatbots.md
 rename docs/{guides/tutorials => end_to_end_tutorials/chatbots}/building_a_chatbot.md (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials}/discover_llamaindex.md (93%)
 create mode 100644 docs/end_to_end_tutorials/privacy.md
 rename docs/{use_cases/queries.md => end_to_end_tutorials/question_and_answer.md} (83%)
 rename docs/{guides/tutorials => end_to_end_tutorials/question_and_answer}/terms_definitions_tutorial.md (97%)
 rename docs/{guides/tutorials => end_to_end_tutorials/question_and_answer}/unified_query.md (99%)
 create mode 100644 docs/end_to_end_tutorials/structured_data.md
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/Airbyte_demo.ipynb (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/airbyte_1.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/airbyte_3.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/airbyte_6.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/airbyte_7.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/airbyte_8.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/airbyte_9.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/github_1.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/github_2.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/github_3.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/snowflake_1.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/img/snowflake_2.png (100%)
 rename docs/{guides/tutorials => end_to_end_tutorials/structured_data}/sql_guide.md (100%)
 rename docs/{guides/primer => end_to_end_tutorials}/usage_pattern.md (91%)
 create mode 100644 docs/end_to_end_tutorials/use_cases.md
 delete mode 100644 docs/examples/analysis/TokenPredictor.ipynb
 create mode 100644 docs/examples/customization/prompts/chat_prompts.ipynb
 create mode 100644 docs/examples/customization/prompts/completion_prompts.ipynb
 create mode 100644 docs/examples/embeddings/Langchain.ipynb
 create mode 100644 docs/examples/embeddings/OpenAI.ipynb
 create mode 100644 docs/examples/embeddings/custom_embeddings.ipynb
 create mode 100644 docs/examples/llm/palm.ipynb
 rename docs/examples/{response_builder => response_synthesizers}/refine.ipynb (97%)
 rename docs/examples/{response_builder => response_synthesizers}/tree_summarize.ipynb (96%)
 create mode 100644 docs/getting_started/FAQ.md
 create mode 100644 docs/getting_started/concepts.md
 create mode 100644 docs/getting_started/customization.rst
 delete mode 100644 docs/guides/primer.rst
 delete mode 100644 docs/guides/primer/query_interface.md
 delete mode 100644 docs/guides/tutorials.rst
 delete mode 100644 docs/how_to/analysis.rst
 delete mode 100644 docs/how_to/callbacks.rst
 delete mode 100644 docs/how_to/connector/modules.md
 delete mode 100644 docs/how_to/customization.rst
 delete mode 100644 docs/how_to/customization/embeddings.md
 delete mode 100644 docs/how_to/customization/llms_migration_guide.md
 delete mode 100644 docs/how_to/customization/service_context.md
 delete mode 100644 docs/how_to/integrations.rst
 delete mode 100644 docs/how_to/query_engine/advanced/response_synthesis.md
 delete mode 100644 docs/how_to/query_engine/advanced/root.md
 delete mode 100644 docs/how_to/query_engine/advanced/second_stage.md
 delete mode 100644 docs/how_to/retriever/root.md
 delete mode 100644 docs/how_to/storage.rst
 delete mode 100644 docs/how_to/storage/vector_stores.md
 delete mode 100644 docs/reference/query/response_synthesizer.rst
 delete mode 100644 docs/reference/service_context/llama_logger.rst
 delete mode 100644 docs/reference/service_context/llm_predictor.rst
 delete mode 100644 docs/use_cases/apps.md

diff --git a/docs/_static/getting_started/indexing.jpg b/docs/_static/getting_started/indexing.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..8672967213caf28fb27682b6b9e2e5111beb9aaa
GIT binary patch
literal 27552
zcmex=<NpH&0WUXCHwH!~1_nk3Mh1rew;7xnIM~?O*;qN)+1WWcIk<R4czL+Fc_f8|
z`9)-<<mF_gWMmXn^wbrUbd+UeG|V-13=B<7Oyt!qZ7qy!^o&i6K!z}Ka&q!;^GNXW
zN*F21C>oIr{vTiv<X~9F+QrPM#K0uT$SlbC{|JLT0|O%~BN#A10V5ML3o9Et2PYTz
z|04`r1sIqZnVFebm|0m_SQr=>YZ;lC8CV2ag%k}P*@OcV*_8@Kj2b5{<WP3ncu+Lx
z;s+Juq@pHHE-`TlNhwt|bq!4|6H_yD3rj0!7gslT4^OY)kkGL3h{&kql+?8JjLfX!
zlG3vBipr|yme#iRj?S)0lc!9bHhsp-S&J4gS-Ncbij}K2ZQinV+x8thcO5!><mj>E
zCr+Nabot8FYu9hwy!G(W<0ns_J%91?)yGetzkL1n{m0K=Ab&A3FoS&sA|M_^^Oqn4
z6C)D~3o{El$X|?1<qV8W%z`YeiiT`Lj)Clng~CckjT|CQ6Blkg$f;}`^g%SK=pvVx
zipfLOk07sseMX$en#l4Q++zrT-D2QjW@KOzWENzwXZZe0V^IUwC?+!u*4%YIVx>2E
z{#)6&L+52(kDgNe>&uWC7~}f>=zj*rh$o(pv@1E@vgGU4iw0?Pz5dqp!F6BF;*~Lv
z|5Zja&j0<gm7!{>=o9%57py{$|EZ`*kKHM|zhtrfM3eeMZ?2~PXXwvwk9xCjj={gc
zSqxu)X`irvSXi<2_`7|g+3R1Q*;D^S*!_>5yy%KE`?*T5$nBi|pP}ah`}&J<j{h0B
zS4Z~$v#XPSy|Iq3eoj*VkN9l{^~b-rn%=IDzq|5ZN&rY*VD4|HA6mt~Pki;x|Mfa&
z`F{q-<mhSjk~Yj$4*mk}!d3n}6CZ!n?!VSxy-Qo!|6_Rj))-#<n9J*C{Ab`X_c;EE
z-E_SJt4JUMr@yYcH}~OPwOaqso!-?RZjrZlzF~Of`Gz$ky#0>Te+G$t8h0Yky#LR@
zF~5C&_M%CwXKpI5-^o7z-FgXK2VwOtJH;CRss9-s3OCl+Km2>tZDEmprN%c4`yVeJ
ze27}J{(yG$$7dDoM`ztzT2S}7lj+HSh6CSceZ8U3b&KOclS-Gw6(zSW(Gb1{#tR`+
z7$5&<_;AjS|3AZmX*>Tj)Ia>s@HnLYu*DDie@#)(|Ndua|Ig4Hz9G;-&i+3G&*KOG
z8GdnQ&;R~U{-3G)ALIV!{|qlw*VW(uQUBv=HLJ*|X+taA9PHEeIaZ#^s(Q8Ih0Djn
z{|pmWPO6dnDAzo@pZ&+{fTqx=x$h-r-1$B6__MaXU0n>H>UsVITs65{WzYS`X+x}e
zPz`tWQ;D{k55_mre;;^kBe3<g=$VT52g~FvK0LXVdu!Dl4gKuGzQoBJf629!%l%lF
z|KNSs`chw|&xd#2OkA50^mkV7bDbBQT%PY7Sv|k8?`5cph2|7S3rSOM-<$PTp7Yf+
zURYfTVR-bezT@KF{d?-Iazz;4zWw|7=TPG@DxEsK-qgsV@OZq}YOf_LPb}ar3DUf?
zpzq5ThCo*l5N4de_H|Wxd&$hNwqZ|o-zrL`=RM!Ryi$FG=#!O?`a9x<pVt3yc@WS4
zqrNb3MNI!aFWWx;e^wSYVk`0#>wDrw@^qe+|7U3NKYV`cMNg(@ITNqnX+Hlwyhm((
zP7}x41$`V!9t(S&R5BKZI=o<zWo4)o`opw!zg&7pP2}2F;q31%Cg}P9;jdi!pP{Yr
zL;YWtt>=ILXW;+OAhkYWQG-qWKgr_<|1*3Mx^DmekNuyl{vYY`?Ee`qc*p!_*#F@_
z!^c(MxHOoL|7UnOr$+uigQITozyAz}|1%t0F>2mW48JLL=i&r|r(F4ZHSt2{qsRXl
z6vNkFbda-FKlonoh2@S9^?h48RO7<#eeCQ#zf-0zBkOuwokrHIyuI?R{}dziX0Nnk
z-TSG>*{(wJjq%?Hh4VO~zIHvcDSxn=U#6lc*X(U*o{0F?9mfq$-uTP!WZhpA!0zu_
zx%}XL(YRN$T%Wg>ZaN%wVd-y^+vg%L2#HL-)5JCT4S&^_T?`tFy1-a-%|kX{O$(NJ
zS3_rPPPxE*E6vtI<SwXaxSX+oJwN}w{JGvC$4mU<HgB{1qd%{-G?*SJ^z=;$t(ecd
z<%?+&YWu6{)33{ZTdP;Uo{_uuv1DahLD&AEigohG|1$_Z{;>9lmEOd(TgyB3HYjZF
z5fZlLems9spZ|tuJNMc@)Z6^%_LQspRI-EboTy8=th?m4Ox%$^_Jz0qGt^2kT)I|g
zRgrh;`I{)&t?%|$`HAW(D{_9h<L3Wh*=Dz`AEzI+d3j5>y<Io{-RbgXg<owa|1%tC
zt*YyOcvkxGw)^Q9znsq2_}Kj97_<5Bvsd>EWd}9bRR0rY?=oFK`>4D?spQ3dU-zzi
zFu%=1{>R(Q%>N7we|S@y3-hb%+^c6#GXBp{le^x3QG;c+e%n3n%O9UqB>Sk87#&cx
zpZ#omZpmD;<MJt&-{yY2e5y5Ae!}W00gSF9aLk%L*Y4Dpq*v$e71Tdp<8}W`nC$bz
z_u1o{zxDIHbeXqi+Ns{3E7*2LX)vEH<!_W@*~flr!_G~~f5NWo|CwL#S6lg?$hx(Q
z|J)P%_-t*$>hFm&rUie!<?x^3xNkMD2;<{rj~|D({+xB+=cDeX(u@x($G4dY)w8Xx
z%0C{fX1y`_(Omh!4}b4}<b0Iw`1OIoe};YaQeUf<u6!w#w>2xb)LJ_G1plAS^9&wT
z_k6MY!jM_DZ|>TgYoAT8&9BV6tJ36f*sfice{1Y|<*WaA?50-S+JE?O(qq>v;`29e
zX<az^@0>XkY-bmI6J7XVe&_V^s*4|%_vF-X`r|f7QCaZLuiWQNwVmu6-#W<6bQK9;
zOYe9;-}}S9wJY2TzHJNkTYfiS!@tl&H7u7GZ~l>(U&j7k|6SV5+dB_S9RJt&>w4_t
z{Xf<Bss3l!5PJPzSJr_as{a{O+MWJ0Z14XougXyVCD4^YgiI{@_)N3&-{p%U>_V=6
zIrM483y-kppBv<lT;FhgLPh&Ozve^r-?%3KXK>uLfIogwmj=V4E)AxImMWLXv=8Ra
zh=xBc)mNs9$d~-hiUJoF+Qvu9&oBKEA9h9AqB>f0dGNax5%pS^*VP}d|Kwle{huLW
z_4>aeR~S$H!4XW$!x=w#|MTQ){m+mu|I5zn{R@y|$;HgaXR@{b_IKG(vvTdr)~A6N
zCagXGoWZ_xed2nBkNkh;vbX$acq6L*uemgCKfLcCc&1SP%GIik_IKo?(0Ucnb`wfZ
z1D{xoOxN<T<9Fq~7^0B6wl?7pd<?R`*C~DM{V1IE-F&S;`jaCqY(Hxm@AfZB+Pi9B
z;E&YeN9#FXn#&npTx-9jq_E)HuBnrRcW15IP=C1cNxIDQ=6L5H-jCSZYcg}zFOAo~
zRrd6)>Q6QQAN+;yjBH;pFNa1l-f&y5`Q!VNTiy>!uNNs^`Et8s)~%9<|I!&&*!R6(
zb-nzNzj#KR(5ByhTUQ*-;W*{ymooY9Rhjuu<Lzr)uKn@;D1W$oo`Scg$$QSsR4LZk
zZM!DUP3C>)_V{w+iIXObU)<YDIZNxgUi_N!fOFlO#2q#Dl@|>E`eudOKk)ylzE292
z57z(dih7~JJpbeUKa=kZ{b#tL_4+@9t7$@w(0_(W&I{{rAOF`sY5Sk@{|qYMkL>@(
zwd+5_Mc)W{mi<pAyomqy@jt`jCky!gb!jYMA{XyzK4Sl=L{9iW!-kOgzj&`!{jFs0
zsDJXpM*TlS!b<xu(yJK0!UiTZFG5<6pcW)Kn9&BDO}hAZ)os-UXLZr@Ki>cIeBW9B
zMzr)l!=hQcxSpeV)<FWrx9k4x;BSim<WZvkyW&5?#ZTe&@I*^=l0-_ba*mJWe_HW#
zf<oT^FWalVb(rA+b|@&nyMwbsnELPL*P#^UNru(Q@A{X9fJp{j6zBZ5_|I_CcK?5d
z4QuBA($c#3G5p|to*$DQu3!6jvgei0J7Oc|?#vJKYx&(HUv=>Bocbs8?*C`F5$69_
zRJ7_ZL&c5D`wV^<cT2za7mZMN+3_gBUP#*hgWAf(A8%%*tj>RQpDXK@&Ci1gv#0$2
zbKB2u=KA<$-aNraF8WG&O7j?5HSu4OVtBmee77Cj)v}Hn*KA?=hr7;5{E1=wbv-<D
zMPwG|zEhv>J+*q*7FzgA>e*NOHaX6#+ojj@-dJrOw%wWW&xNk1djA>H?{l2^m)m=7
zb)v%WWzr1?>hF8`zjlz@=vwig;g9Bdp@`zef4r_K1YWW<pZZtj_)G8q3>@Xhr1gJ%
z{P=XrgOcAqk>{UFgGWUizs2{J?f>+jVPRbI->H)S8IFtfzvb^M_|I@g+G+kjw#D{8
z1t1Fl?)`Yq@IQmnv{tqHm#aShXP9_Y{`P+cnfuEB1ZBDG>rLx_DnJHg{x1D^F3*0$
z7Y*z5s`<zNW-dPY<9X?S27N2hWwlct|7VcRf{yP9|2SLypFw_}#pQ(HTlW+v{&d-Y
zZt_1-`R?`q8J_5VG!8AbO=__S{?9Ox;jaeswRL%K*F65Lw@#_a{=L^j_h;GDul&=f
zQ1?84tHv|wjMduRGtWP1xUQQ2+pZ$z<nyekdFxkr-#KZu!hX{8uj>~3{P2ActIa0Q
zck9&QAM3XJ&p&ulgz<E2P4%UBX=eMRKKxs|Hs(JA*OF+Ff6=r5Gsv#s*)M66{Ar0<
zoal!7$9ZYm{~7uX?3=>Z-HwWmbWqr`y^CY|l0t=0`G%D%n9uC7{owC-`Mv#?t*>YI
z3%p8smznQrIPL2V&+~scX6^jAQp~>j)-9!0SFwkoZVYlYHC$h$9@v@vXE>>Nz5ea}
z{|xTO9i;!U{AZX{{c!zn)~NpsFP5%r+WDV>ss5zxhwFcv*4BRsy6)&8^Pi#bKf{xG
zKc@dX7#{yib2amW>3=54a{s$$|D|T?eggIXSKhl<#KeN{#hg&y-efi{T-iF*&GYe%
zkgM&F?SBf^IR0lyxIG_Kb+-R!;BG!(|Hbg&e}=jx{}~=-y=_0TpEavk$84X<1|xH>
zO`cDm{NlawY|s7&<xP7`AL%pSjmws@-TL&@D<QW{UYv?fZVs<s`9F`nxBf%?fnRH9
zAC{Z2&1hoFysI}O`PUgY|L9_Pe~^WV&+&z;ZbkZ`v-x2kruVO}lg=*JE##^0?dkD9
z-FlDd`JzoVH{Q7KT%T~=#&t<W*73Ky&MoAfyRSgX`Lw(%^EA5yQVf?D?fE3%C&w_W
zQ2g8JWq!<i*DS2J-BV|J?UkL{#getmzsueSbe8PV=3a89%UM9AcfoU&zA21rU27gc
zSTFk{-RsBxM`!cCheT|7{op@C`kbSRLTRqb4LfC+uY_N44&CtYgm2`!NB#a8E{`5L
zZ*)mXF;th3P+rRrvuMw!`rdyGO%IO#-SjEp!Je{BQ~!MX{3ASmVMVvcN3;3g-b($`
zK3(%xS5jf+mxO|l-vS!UL4D0?U7z0fSKWQr|KW{$s`8Qichjzg2RA8eopG!E*7!xO
z!OXP2M~>}c-n?72xoUIyBy*E99qiw0-2Qd-JztIIhv{AGShr1G<MQ2o>7G>=t!tSX
zcYipW^vCW;@k42Q_RMnmn`Tz6N3X6a_AV{vIukq7(c%styTySoU)R+?T7Rh9vSiA&
zKZ;9hr$=s`DSdK7?7Xb!l3W%%%h{D>nBO+=r!LxLqg%Y<UPZLe{P67+refRuw;X%D
zrIX>XOxa1vKiSGh;)QDzA7*dceC2D$wYPO&1QvU}{`c)d+5F@#hSw|B#UHL0tkHaQ
zx;b}A)<^H+rkYOC#S{7y4&?nYj{5a%_p*w7@5#Ye-?eU?lN!2I<7|^3I~V6`2Pu=#
zYVOy${{GwcZ=Qa3>(0*9E(eY~Jw|-|w)|D+uIve4lqq$$ufM)#!z=M8+!;Q3HD~7V
zIVcpra%IT581ZX;S4pgJ$mK0}-F`S$uGl|4Q?PgTnVi;R4hltw*I8Yw`OhF|qtD*?
z@Wb<Oi@aBz)@X6&GH$JROE7DGJEx1`mBza2Bl|2q+;0)J-}cpG?UKSNEli&bzTR3e
zr~1p9y~2;qH*NiWWz*(s6Z2B6gRDF5=|11U+Q9NV-GuShMb|>B>qqyqyf8nXTE15K
z&atvx(MLYLu|258aeiB|gX~t(-iP-^U&Lu|$(}15*!9Uo!6Ih$vZ}lwM){7lvlz5b
z*f{@ZP<MQ2|C`nGKf{8%;p?$BrH_47?<(o%jEuhbI$Z76t?I_SO>cCVr>03BP~p*h
z)?k*gsw!{o@@%v2(q-|{_oC-ba#Se%@_6C5)l(S#x-=HBd22axpH<H7Gu1i!f?aLL
z{U6cuSpG9C?Bf6ZRsBDMaID+L{|pM={~3OL12x}GAI@jqqxkSYgJ4B@@cf8NYu~GO
zy-_k*FlqG$nE>`xH*0)8#Qs?=BB#8O?+&B(jL-iWY8hjLHtk|}<5^?GVaXECyRt_9
zgWKY)3%#PcF+Doh&Yd__>uzD+VS2cNIq0el<43jp_FqeH$Gp5eDQ8Jtm&cu@wah$C
z_IrOkVZQTXmDI8A%QD1xYgfk|&dgaZ{WK@#7vq)riDwH}e%yS(-+ubvjS4J}E4NP3
z|JOW=;fs{EUcFGpw|jEAb>2VvrmT>;b9{R2lE?OXo=+a_(RccIzU{4@RrWr``N1KV
z*5!IR7e2M*xzXLjdgAxmD`gA#7ip{t+qJdz*+jXuyjSWICMg=7`_G{C=2*GxjRVKm
z7G95+vf<1wxG*>6RLFxZVV{itRTtEAF}z=~W-jxQU;14-x8A#6QQF<<C!go6_@5!u
zgQZSs>k6i6YggaMh>E#2ZF#}AZv}5Zt0s8FYzRES@>L<|Wz^bf+m|`7czK<>Fw;WK
zGDJXv$?^61fb9Ms=l?UP%#Qix_@80%<CS#+{~1ndUD*G6&ir4dFPOJ4>RP!ntIW!_
zXZo(*J7(K9_Rorp(CYuUe=9@uq8+S1u0Al^`Kf+e1kd9$W}Dvr`F1@&xi)j<&7+mc
z#iD1oEH3gEZVQt;62RJ}Eqz2z=fWQCyvf;7Q_Q;lGZaSay#Er)GXFrhdHToM2c}Qg
zX1G?fEV<#bqmurkqXqUd0qoVTrm`x%DZNuCHfWtH;;=nhv`_KJ)Q9#wv9oRc!(4a8
z?`840wCO*?-Yv5Gox2!bn8t>kcDbx}rBu3g`<4b~l{K8)lNer}57NAqw={kA%L`dz
z!kur^V=tayWNcq=bA4vT`(yWHKjt5rFHoQKsxZc5qFbC2m&f9>eq|fj<^F1~_!0S^
zLFKr^e}?t>e`j1^k9RGar}$$sF8jAX+%Ns3{OH1C-7Ak5swC@kvZ@5W-r8XCT_%9t
z(KRJoPW|E*?$z9}L0_WJuJB0O8q{+yIn~XAU1rnQ<53H1Ke8XTs_$6)g|pP8HmGtM
zYm-X)tv3#g9TIW=PQ?zgrXqn3!Uw<mUt79j!SCg-gDx<-iuOM87kMcsv}ya^YwLWs
zZu#6@Iyu>ZFLXva!vurnvlwoo^-HfVU;0`qZF`np$?o6TC-^@e)F`m+vy#s`gVsY~
zK<%RJ{`J6)v;L&wh5c_I|7UPN8T*$(RP^YPuxXp_oO`I)$}`FEpD44+zfj%OiuwOs
zZPWklN&nAqftR6p{)Zhl%>NmjW#qrV^8cr{F5-{x2fxKeNAn*=^S(XQ8Gq{Je}=}!
zxX=&U2j|IrlyA|0x3yR4)~Aa1w@fP?RrIH|%g?l*TP3&pL-K)Jb7vpgWpwj|n^<*t
zN{Zc%bNr8Z8Om23oMJS=QIe&6>3Yr|>8oGeWB<`x`r&cJuI$-nCpS*q-}3g}?&6D)
z#mCcR%Fl1js#@FDov8Y4c_r`CHT6}Ib2Q}c$7eC@bgg{#C;tO;%uoB<K5PXlW}907
ze7jx>PR6d#WL#2o<l)rzjUtRmU8k=9@m=&I?nGR6RF7G~<W?i0$n%R<oJ{^Adg{iX
z%nzb}FWp|YO)<?PY#yhw^M3{=?ZzLE8mxb5?LGT<?Y!A$3vEtqn{epzp2g1?8SX^)
zEtdHhYN9WYS*LbcJn-VXqgNLjIx4uuxd*l}Twt3k+W$yj<i$SW&0X(f_K8jKR-e+B
zoPIiCPR)XO4VSmx7kcqe^3wI7Yx=BfXIm|pu-Gi3$>C5_wG_j(MLYghKk9GZW2sZ{
zO6m~jUC|?fZkG&#jJR3Z^BmdP*=OxuQt|oFuV=l*t0P{Ey-m$BE>vkbmiXn4G)wyS
z^;rxrUaYt8ejMMRwpZ+M%q`DbD%F{xoc|dr)-&Jv82cvZxQ(Zd-zK+LlCpfx%qJAS
zi!l7sp88S$NIqAE*W)+0_N|pEk2rtxaoEHI-?{ki9~V_`t5c|mKl;hU%51gFcP?`i
zzO^zlzH%K9vOSi;ylm0NKaP*)iXXb*Xt%XSS9!<eQ=L6Iw`WTpKgFB$qxsSD<~_zM
zOxqT(UVrPtBael>OEqdGRw&5lJZrGl(iT5FPxr!}=(O)qa*Ji}6i!fK5G_$(_B4Ux
z{p~s7?SIlfE^ptuw)(K<@+@uZ%;}qIH*n0YSwG7@!1#OgvZ?^~iy>j^?KZIsE4E2K
zyYT+nCkqLkC2lXbOq}?=VGVEeWB$f1)z`P|xOOda!o#C`{567K?sSx&xPb3f*TE0>
zdu^;Ay}0|uHb{F}PnySvzzbDV6ZmXDcHIN_*ZDC9VB#>kqmaQ^?b*|h-;>z(QTKfN
zy000!rP@V}i+9*X<erkqP5v}bqHxZ2H;x1jh2|5=dl`)Cci3%>TjT$o{o3ja@L|i)
zQeCg}?w6Ne(SI%w<*<9ngeIr{!?U)=)E~9~sV4QG;cs>Qn?ns&vhtCazy39|b@$#G
zJ=>F$Q>%$l;lWpiucCE(K0j#xGik5!zg_<S85T!vTl{0<L(US<ty8Z(6chA*vSm%*
zl6{P~M0(E@6dy2QymB$dZ{o-7gHhKO%J#0?Aoj?OTk5><F`0P{#ysh_O>YM<M$UTP
zVX~e#g8OaK$K2dmjuI26Ox)4r@Ic-<=G*3v@dvA9Ki`aft?^xO?z#zXpIYo%Rw&!u
zJX`ReL1ItfqKf4YYr?Jc7K`4=TyaE3&si<VCoj0kUeH1ABFo1+w~yQC?$ZpgGI+;v
z!c;ZAqW+)jUj2W29{p#yz*;=({Ly;W7v<+u%hxF1aVy&ueeBX3sNoD{Q^M@q{{%0p
zSaxw;NvxapjtdPBi`LA4uX#|uV}-)U_3i6yy#C4hy1K1tS-GuZE}z56gQ*N3&$CZF
zv4Hni*Rx03-jVCl3nm<D^OX3{z|}aZUaLFk%9<~|R<+InJG`qsgljhKca^{X^4_Kg
z*SF{k{|Ni`>)zXCm!dS2XC(?RKPB@1n1edAO+)1Z?pbR4m;RXS|GREt+4ivAXX=tx
zG8Y_kSCD(OW%Z;V<?L;`d-Q@2h3nhRe7;lf_vZb~1wR|?ru_J)QsLyw7n@nSqwb){
zK5wrRuY6{(XRVZ;@u2yD3FFnkn6Unj=?AOsEtKtDwn6Ms*llo}d}G*ftafwkS_YAD
z@il+KKg6}yd<(BSWqTw&dx_wl-TVu|?Zq(vBkS3Fm1M(z=Lx5q?DN{;u3~4rb&B3o
zi?2)#R=beK_PDm$z4#UC1tJ5OAR|U;uYbL5Z`%_2wKgcM>l9Oydx%bWaSJ>1<kMUX
zmlkdM5-*goPigz-Eww?{z8X$aoFEqeefo+&u2})>6Y9>{fAY`&&+tZD{a;ttUiM?N
z&Yo}IBf09;w`}2<TMj8bUYtShD_H8SKEJ<t{GaFB{|tXyQ~ont3=B{H=>5;Ly<`9D
zv-_7aWK@_(@3_84bm?pJLmJO_Z5KNIv}px@P0AepV>_+$<zCzAf9%ej$J2CiOSIbU
z`qi;+=X&nn?O>C%o42<@X1?mqk9B)j*XV7$v}LZ{R~yTvhee`}K8XmMu0G?+{D%ym
zudg`A9=~vpq*$|j%a->)+$Nb`ic`4o=2h{P?X@@06`w11kiHV$o85k->iBW(^Zc2!
z-rk#^YBr;{;Nl6-E`yNGZaWX{w1~aykQ}<le_>pW&Xp~)#qJ-)HkRfVuQ;ArnOR@_
zY|X-hmM_=}*50;M=wkTxO8bmW{o2=6+j~paGgegn{qj=pp?UcAi+_JSl6>eNP#1i8
zUibv#$bBjw*w#K+%lG(5oY=>>?W#|kq&pw%-}!mb4p+&Xbqp^jpQ|dfxxl{RVx0M7
zdC_}!es~|8FHrKkdv<ojy2VzlYl1eoyIB+*kTKwCb*tYSZfBAewdX>{E&2BK%eU{@
zB`&^Jb<3Kb<Tj-nr{?v}vpukYKVqHsv46T(Y%Cwib4EnE=UsU9t6|#?qm?RZ0W~|6
z*`L`KN5rXrT=V{e+Wn}_vs|x~T4&@g%9zpqc|O-~kLSw#{Y%q++&Yx2AM_(QxwugE
z=n<)&Op{AH%O)P|E*7e^v|PZSvS`ozWB-(QT&t;-Z(94uVHewc)(tza<hPs_SwAPY
zQ(|Is)AEnDZT$9Yz1a`PZ+iVt;^UrMdzTbz#lI}C>WYy}PHsBE?qoOj^PCg`{)#pA
zqOT^us1v;UH9E9+oz7d8d4>wA-ddB47-b4$_<K#yMj!cScBw}2;ak60t9#F`zrB9_
zf{cff<)M|Yj2Qki>}$_?u7B)51Ai$$*AMlueHU!J*W7W@otvNYmGj8Ov%)-<GQ4XT
zb?0X>e7Vn-ZEmOVA#d{Pdpg;L<$0S<D6wd$Ul6LFn|Hm}zFWUW`-A*pcQ1xlyIKk(
zZwWEpcsqYKdvk-uccm?tADS)CPtt$*ao?UAkJWo(Y|p)r^4sktIBDi%mVdG<n6Io@
zFYzP)vGs5M);Nuia@$S5mz!1fJW<)SIdPl&HUm%V@Zx0I&7mf}Qs<9WxmZ2#s#CcW
z{c3xMx_7m$pM~-k9-~#lS9sEwFOSP$zO%aF$8FHGOzeM#?e%}<WkY^MKI(q`dT;ij
zU+b2Ott?&QvCnUzblYzq-S3_Xb0!}1tA0F*b9KFR{gVl=_kVj2>K`oN{f{;<6!V{<
z(f-N2AKCv7hVB2QnY~~EAL?|-)kc&lkNaq?P1MF_gY~b&ALsw+*r)KHVMFfue_dI3
z*4Rt_XE@RQi2pa+o&O9kmM!47`#abE$=ta58-D(O#YL;$RvdrxbLEZ5KX$wOUj4eo
z$+uLQby{d}zn)!zZ<X5i$LG0!)E#{H{_AQ3k<_HF&`SB6Au{uyzIKoce6)f6t^EQC
zyPti+YIgne|CEbf|FQews~_!;Sf_XFaa<8OpYO&Y@i)bW0e+k()t??rGJeG%di;;U
z*8LyfAGLLVB>Hx<b24Yj7TyaRW4jh#&q`On($yZ)bpLq$7SU_XkE&OSdvA2o&FNVx
z=Ek)=<9OMfvlCA;i!fgJ@jU#8kB#<<$afc(?D%cHcGJrbi>*#JZ_jqKiz~dtA60(f
zws&svL*t07Z6D)quKm)Xu*2bH<JJQQAHNr>+q(AAUFidLtRLDIm;ar(G|KPVCHI_~
zsm3;~9sw;+H&3#Bd*xdFn-7jZa`m?@)c@Bsh4J(c{=-(H?~kpQczHd_w&z>e!9U%V
zx>pzX3u*ie>EYA9@K5E3*zcqNWH($i+5NiK>d?J_%Wfg98jHCW@EKS>Ue0`J$9aaB
zgSj8SA3IdJxMRoUyl>pQzjt3Y*)RLm^V!uJ!-wDZRohAjOfS82;s(#|t400=3nur?
z>34LH_1Rpb`r+Q(zjKW%a+7+?_M|4ya&G(+!P@!jbBy_c+pEQXOn!7?i_`1HUTxkL
zx0Rm=J+!d-Ao1AdbKJ!258ofYZ#TJE^2=<R?n2+UrBy78<?reIlz-m!d42P))_E&G
zHXpsdtAab-rpSgt@BZYoF#^^+$z2TJPaXRh-e&S%{72nm_bXR>gdVJ3c-ZanV$HAe
zkF?YFtbW|K_>nKa+oi8-*}gry#~t0Yy_-A1Aa}QSb*jgIhQ{g(?3Vn8^E>}>oT_j?
zaxwPCN>$yZ3HyKY9(4T55`6sVeVGcoXBsu74}GGIbAS8pdZYg0W=irKj`fir_IH=m
zi@!9Ce;HnQa;M`Y*~4dAcdynDaDRPkL!ItxE4{fNzDI1ks@8D2H0R^(=#8xRg6y2c
z*Pps!`L>H8UcBPfz55Ss-*;`XZd5wWAhs!aYI(v;<*S}Ha<60dAH3gQW&cNM+dt)N
zIt#Cq`g)h#emB8OImpjI+GgI8ug^tWAD-uWRW<+3`T8`a3FmEkt24zcQ^Laz$iL0W
zsd`d%J@teC(QQ)K8*@q)TInrau0C0JXO3Zj-!<kP$Lw097@pS||KRt#RATb6y?yQD
zOJ8}dGHy9%EHXG1p^?MK%%*+f?VRUbn?H)j<O|iE7kJIbFzu!8JO``%d(6qV3ry1=
zpYK|;)<#~vZQ<0ktEQ65@k+)!FNz*cX}qlJz`W0YX~@U*?eDh4-`u2Y6k?yTrS0zX
zz;oA(G!uV_*-vGtQa}8k;iq~WXxc1w|G)02&-I=2KgHCr{%43){?Bmn>b*ZkSJr%$
zZ@a~P>)a#`okdMCa)B3}HrTZ*?OA@TPI<%iefB?sWUW%>Npp83R0cftnV0sA;fn>U
z`*jn>?8l$umUr98+$x^3kMBQ2dg9fT`*N3mKH7bk`QiH38o_0Hi>_&xuZTRBr@DW>
zuCi6nw3rWjKlWc=TO==8|72;6?SF>#SN}6yycoVQtE~Q(ROr)*xr-jLciYXL(*N2)
zPVz(h;j(-7S54PTSIp!1$~o)BQ6A&oi>nh1fBLg9oMN!yKP`K|f1gOkJ?#s#bj^+(
z_q&>`m9Vo<QR0=+`YlC<^A{d1{LjF!bw^3d)j!fJH{E(Q;og>&BFkCNF<JEoaWVJT
zXMNG{xM#KPgMQo6z4uvWbx+tfZIii?S)~QLwZlpE5APjh<{zoej``qzaJ9Ae?5EQs
z=chK`J#^f<;K&YjcGVD`ucy;i{Lsp8nJ4?<?%KU)|8<wvebkOUUo0_k=82yTclbo)
z806+{sR`V2%_jPTv;VdAIhSOe*u8%~XpvFLktngW(blw^VY>0beC`_akL7JTTe*F?
z{iY=gb<a3fepJma;jm5d{1wj{Z2mI@U9-`DsNZ!<Zu^5?x4UcdmwYni_D(8Onif#;
zh9jr(g2?(MtB;k=u5X`Z-}hxc_sf0!o34~<uVkN<I9WdR`RdXq3}@UWnD6bK)xh<k
zzcXCw?egRI`TZZR*3d64z0^OE<q1p8?1r1F(-*z7b@xBC_a6Fu9)tQH6?>Wg3@>~x
z{AYOo=|96K*Mj+CrSYBO_e8gKACr^6T=Lf@Zn3-Wn#LKYH-1<yv|rj=Zf?8jm%GJr
zW*<V^`?jo(H4mCsv39|(=*dq6Bu|9J?o@bC^PgcULrwaUSF@H)u9^H&e0tb+mxi@D
zZ`RcAc>QJl(tlE~_ls;5<hcGNL*CEg?A7PLOn!XjV!Zof#rd`6cCsJL+xBQQWc+g7
zBh=VzDgD$wJZJu4i*;=cpb1w-?}+~l6I4N6G|({YtWWtj&;N6cjsLgb{a=IjwbK5d
zO!5C2u5X$Di*vo)-}$E8+V(tICrc~ynN1%kcx$axPWWoV`1b0<dLajS`8`re{~4m{
zPVbkSpHcsE*4NzdWAmgg=G=SrJ8F|l;DpsiySHyzaZ>$docn{;JwK*?tg4qho%1o%
zs{VAIOsQf)_tz&ok2`-AVZ8O@{sH|*>&0J2ty{e!TD^VCkN*tSncAXvZmP`r!tkfz
z^Mz1*;aB@P7hG7k=D8Jjqp+5HvXD*V#$%V7pIW}3_&(-%cT1e!kI(FFroVS8UCf<W
zv%=<3x3)y{*RMK-^L9wcT(4gJB97^US!>~n(~Do0?F-3$YJ6m^ww(n>v`)nD0CwFE
zVjQLM?N#&UuW**VI{)^~n$9U!0)c!66~`)nUGs1MW4`eGZ^w#euh||OC;PWu*iyEZ
zX~u;|a!!w#SN=Jh^Y7e}`a{yI`2I88V*k(Jx-|5Hp5nH@OaHi~PgUa22=(3&Gk3!r
zp5M$4vg`Z*h*v&}XFvS#E#I@-KHDM#c8O}Wsj_o0^|6)BKeXc5eC|D(OP9?qR&U=d
z#4^Q8d0M#g%EZ$`wXf&@_`c@u!Q~lIG2*%T%U>DW-&)r5A+#(($f|$Nobm(fw)^}j
zejqFC?cKOE^K$f(#ItuFMmqD<J~}(~`(vgCoBIz<?@jy9AZ7aX!^f11X<H{Q^buuY
z?`L5?A*tK*g>CP@J<<=)Enfa=zP#z{y&4J|sx8HMySI9+>?~qmczfd8xX%tU^2h3b
zs@!^hiT|&>toMiZf1Y8j{~2PQ{AakhKCmYJKf@&7b+ror87_XjV$ZoX*X;howoZ{>
zt!sjGit^lNhV<XF5AbEE60L~uRhs^Y|JZ4J_VXs4OEOQW%oW_Y;|<^OS4Au5-MPN$
zKSN%o{1=f;{}~Rn{AZB**jx3@o~5RK`$yiF$0n{m#IQqV1uK*AydSAyc4r;rma8|^
zsOnAsJLjI<<<@O?7wB+FRJZ-U`Tbn>nSD}Q&o}MU$&O<+-m|Nu;cTAF^Xy!{<*x!B
z{F*BI=2MngVCM6H>Jwro)Mw=JG@rixdiOHzqcz-yAJ-qZO+H%IasTGDi8CHg>0z^G
z$^O}J)<ITp@dIwpf6B2xwjaIztLBzRkLud3r*l0{F!0{q7i9PG_{y`z_l3Xax0TBY
z?Af+4S1mR`<G14FM?t4<${%ZHES&S?-;Oo!`Cfjr7qru_=&Xq}KBtp1QRj_Z(CV!*
z>lj#MG?*1i)zx;@cW#s8`Ox0*RYJGquF>Vd-Q6d*MJ#kQPU)HR_YLEmdGqHoSRbxa
z-g)6aL%N6Qgs1Cr-#qb%+<EI>w!8cTyZBS9{z!jNn_Bp=<fGokbg_jiUK!amzMJB?
z(OK|7&YvSqp0Cf!$zOc-k^RkuUAe2K&-2clWg;6n>ERrW@2Z7AuC*Wd&meN+RUFs$
zi&81R-hO)h%reG`Ne3I=SswhNd?$d-|5$(fFBdzJ`A4jLr<j?SwYogI$&zi7bI?P}
z;)IIKuk`g2`;#9@?$Q5kbt`MyadFKQ<(6O8`!3Y;JpZtApJm0O<GvqUk9DcLT`^_q
z+ORrxA~Tzgd*U(nb+x^LQaZo>GrYN;pPtqFPV8h_uSm*9PjdnGA_wJ<3wY|=KG&FE
z+G8vFZTE_o+oPU7OWj$0@!6J?$)9=;Jb1kFm8a?C$A7&KS?O(E@?-shU9FR{3}P4y
zvok*}UdMTN3j>3NR^j!6b&DUSH9wFS{o!=^>?Iwwt7|Wx-tGRMA##VDyyO$X=J_k$
z236d;{P216yEQsWA6n-vT@`mqQ0HOoI>raW*CkaLtUteWken}<pE`AqaQ*Vh&#h0J
z?mTYYKPmj{(eimBKO!H#=R2HRT$nNa)**vg(XX3gdv=~oJ<mLE?qAP!#*bqJZ@;?7
z*rajI;_=Rskhm^p2L{o%Q$TB97*A9j5B|}dSemns_rjOIZg(fly!Z0Zl$;N2sebp4
z?OgxEqt5a_!-bg(|1;cw`k&!R*Vi3(FMnOx0^YkY;ZN~@hK0t?{~7k5{?Bky^iWOY
z)jE~<im+Sy5*78nm%n_h>Xq5Hi7i?0_@Oyoe{yxI%4DuzE<Iau`(T)y^`+b`cfG1R
z)-Ht9KpkO-8Yq7GEAwBew_;viFBjXc{O)AFqT8S2rf;qCJN9Y6l4HGgIposOa}#dm
z9+>p*pS@D3zvia!g>}Y%)a@4SU3_>S<A?pNO>@=Vw|AxfXPDkDm$Lu+?Ueq*U6aw4
zuAz3;-oI`@UCCGeg&{1w^FPCr;A{1-kJbNWxV9E;*uS#<&HeujN_xxxGbEaU25YWw
z3h%5tFEzDyev_%jyY;yXidSsp)>*X#(mzYv{CGW2hM9hO{o0#a8Yf(4#@@dfA~V@R
z&iC;F_P3f3o!Rdj-j+1{&k(C}{-3pb&%^1D%#T~gbHw;6y||Ucsn#nk-FM(k-SVPk
z)x}-uhgan9Q~DFL_v72Mn-4FGe5&jGMR$9W##cqp<i3{vsIt9f`xyQ%njCYbvES?J
zL+<Aj52cFo)%7iy?~uWKb=i;Fhn!m$FWcU3axr&%26x`7!*?vC8^52+KK)PJ_Iy*F
zPWC>=>h{_G32EE->R#uD2Y=PL^Lxem?CC)poBgbMwoiMRB7DZ2&GG5muXittKC<Uu
znlD{bdfg^ItIy<g#+gDdH)h#ICV#F*?p?veEA7|)X!)_TMbD$y9(!K&c4ghDo}62>
zisOrl@%-T9USIizYRo>I+rJ<t^Yr3~s8h*G_bPO+nYDiWG$Dk!=dtp_^@bm0|H!fa
zxcaD;KYU{D;ww+}b4`|KZ9f-hIQh8N^N$kiQ$Lz_ys^<;5Sw-V^KqGiV_w>3JnQ=e
z-HUt~3U{TqeeLJ05xc%6H|ZDm@up+%PgHmdY?SEaDrcy$-p{0^-w^-Fd+mRQ*RS%Q
z2WQwRUi(x2p*i}>R%s61WxKcT>bhpZoaFN9RP#yO=z>4m_TOr*tktuV+uBpSaM!k7
z6HbfOrSdlM+48U7xPZ%kOaGtFs_?%p?*F>0wtxKm&ogxW-#W+t3=3?F=Wj{x)X~!|
zxyy8`WY#Omvquj&&)0scwd!8WlR4GZT`&IG)t}Vb`k!I_3HvYX3wX~n)Yx7ydCwVf
zwtU&G`>(_|sC*9(FXW89dBspNp@6NhKWAR{X<ZxMC3osB?3&)TXPd8(P3+0Eo>2Mk
zUJp6s4@v&2DvbNj@NoT4(USUKAKU*kIEX&^75`^$+V=kpao+zK7AY>^oBk-hsmAO+
zpJ}J!#T?zGo3~~@n0D`Oa*F0TMuvjICfnD_2jkRsT&Pc1xz=4d`R46&8>T;h_Bm3S
z{f5x~hIRKJ+5efGFZiF~hF$(&mh1C=Si8<XV!zpBR_v~6^D^>yyzD!#rIpHe2G~n6
z9R3ks{NdAicdP4NTYc@`A2GkQXS4LjXD0E62N;@+3U|!g^JwKC-@PAP*UYN#{KLEQ
zMs)VH=~_|JiW^hn>T(w_PN=Y3*jF4~&%VW;v10L|!l?L{Vn(mG^}H;a^u*(Fq1KJ_
z$}Idz+WyC{+4Jt<&d*x=P+R-2)2o1qkuJAWwEw>Qlg_+c<pNvy@m}B3{PsApt4Y#g
z??QG2{=IuKPyXCq>BI88f2<c~Ot^g~Gi>_yb&c!Zty>tGGRyh2P=uWEJneJ;>~FiL
zfB1d4>|)f~C53(~w!JD+Vp9Ipd|%d{>wDsb0QQz2#eUm9EPtz6z2v3lw`%UyPG_0^
zUOkiK!}xMX;mbLp^}YAxE^c{Sd`LHSY2@wMr*GO_N@@&0_jA$j8}jD&Y-@cp|1+Gu
z`k&$U^nVwY{i!SM4*TG<$spp=C#N%h4AKi<JIH?T_+!jB_knKR`9rf(_x9dBySUZc
zVw$mXuCly_yqOXEwLRSr^*ilkGo#qQrFX1Zt6MsC>XFO$qw<A#<}e(Q-66puQ)K)4
zj*aIdk1zV%@vqA+@VZ1EQ_V=6!gPA#<avdSlYfMpFLr(~zgvnY>gc)dMAIV;YhE0v
z{IJ2|*0~+|g>N#Ltvx=lwVEH{_Lr<MkJ{LD%Jg<&ddg~*A|Vc&)rm$Nk7Y#9R`u50
zeN@YI`r-bg`F#4#`Dvc-)k5bT@rqBH9=PSZ)`59^{*vc?YL&qY9_0i-YCVqK^5gfz
zmw!xCS>LVO75GVEPDB6X0%?^^Ut*81c=;o2KSobSA9ZEt*Ix`D|1)6JRQ3-Qe?0%s
z;FQ+>pP_#Ge}+lgH#8RTciAb$WN)vtzOwwI?i8`8-IZ}Q0s?`CLaYaQ?uKW+s`@8d
zeqf*K$G5$YqAG%Fot`kCU8Fcm!S>1%jqi_N$Gd06bNn%0S8?q7ky%HI@0~A7R=WG-
zwos0%<Dm!hchB>)UA<|;<(!h$u3Im^d9Y=B{B9A!mcSoe42M7LecqLCQmEdw<+L{U
zK7(zSr>no()A~npKi93*!ha8bUG)W28-sDU{73(Po^AX8GsNBhZLsdpj{gi@^`Cr9
zL2F0;Gu&)tNUIU}&oC)`S^ag-{|sNU!)O2D`A|0Zk^J^oQAe~Z^HVjvt0UW9wDcIA
z3;E#3)K|Y?y~hv5{|qYYef~47oBW^Q#kF<C-%9>7Ow5jiboei@GI)R7eJDysjx9Ip
zvhTJnd$!Ip*c5$a<FqhquQ`0~3cr)qRh&M${Z{&cxz<5_X|c|Mo42)}V}Hi0lCYck
zb;F(CSN}8g*?;oR|IhGxi~TQyF!r93$gll3Z@ZeEi1XIg_TJQGskJdT|767cLk;G3
zl7Fl&Y+28q`MrEr>e_n?tC{Zj>K^e{jD4&quJC}t;ql6K(vRminw%9obUps!+ev5D
z=0x9(K4f<6NYkl@?#%Bd6uz>3efgh6#j3+c_HkWq5ni}w%XSaFgu9;?YjDr0l1Y)T
zPmbY#%l@ca>{8Aj^L#7siage%hZd)w3Jqe&ck)h`yZ5NU+)m}ktmlX8*|$7QRdy|{
z=Mh`2G{;D@U&no)xcmpL^B;2VbJ^HFyd7QL{Z{JL&qraq<~=i1HlFyIafghC4TG(1
zR(#hlonPw@uGM>XmwVs5Y2W!6PJa5ccTGL-fxnDv_1P-oA71aaF->0)l`*Si(u^rF
zZpOPb4L`F<KA!Wp>H<6KimZQ<AC@1r6^{H6w=nOT`S~YVV$+Vian)gbyg_jjd(zaq
ze_}q^9$T5q-&G=T@%61Z6*m37TAJtO*6ej(apHN`xvLL8uuXovR{OA3c}vb$UbknC
z&rFPx-|YFCx2J(y@EF7Qwffz0#=ADQ@pEQKFTGxx<KA)pEKg4153wiwj+2jzFdnt<
zD9!Kv{>#68?W1a0t9zz9t}cn|dEDHe|E~GYlfQSaZ+i8scHQI$vTlctiMCz4?>o!l
zaa<Qm+MeW)59^kGi2CmO(Qc~F^l7`r?uYt(I8*gk<Ua#nm8j(FxZ^8cU8{P2_^fmA
z<!3^-?!4S=lVfYM?ZGdB3gwL=jK}|gmSAT6XZZSC|5MZ>JJt$wzmF{Cx8`l+y62c~
z;GGRly^Q5r73z%a_7D5+KT!WOIs8Av`kVEWuI0aZ`JX{)+4}zs@rVC2I7BhjALr+M
zQTC7H-?hz`bJyzUruT+<SG1%~?c<tLD$DWZXxzu`-RrmPZ`@V3ROZa~Uvrmku`94Q
z$W-QeA|J!{{Jv@N<N1Fk{XYNeJ^#NpZSG^YbdPQPZIj`f(|YgNsdH~u^-NmLcg?<`
zsIIBOiofkY1K$=|<wIh^cVAB1Ue)@9+vanO%O-)C^o$c03^Ik+MW5Z5u8~~+qd54o
zsjd?1maY3Itoe44cVnlxM8aM+XF2xeJJ-kj$b7Wd{zx6yWgm0Hzw55uRovOK;a*Rd
z$Rt}nBmTXW{>Sr0Z@iY;H+6C5<)w#ioQQiCaqfwb8G}!s5rgHg-%<>}l#o}+?>Y85
z{>P2!#k%WTOgwe3xd~c(?Myk(@}FS|-;K9%b0M1+KJgP-Jj(xPXlZ{u|1ak(`M>|-
z|9PxSex$bfW7PWtv!=+YemFNZb&2`ZZ=DY&dM{IT+QF3e>bAS{^^fZpz7{_^PkP(O
z^0swfw0ACTyZ7Sw-bcqCh+h41LV50vKN-vqe`n8p^G>Kb@<~&QWu#Kkn&mh8T&w3F
zUN2e`x98I|S=+6#7LUK*TJ$qHf5nss+yB{qSs(G?`C)#Jisa-QFFu?s*#GHs>F=Ay
zueb}A|Gl8SegVG%>Z<WFdy)N5W>j?l+jIL@xGdW8^RGJ>bUhPqtC$oqCvoBxqYdBL
z*D@6TXW)7BcG(Z#g||KPr5$ajsrIb6^>FiU<$XoPF=m~m<_GV~o44yfO@8Q`<g7No
zQ1Q_{=VO=Z73v@89yfh!a8PuMCXb*>pu(X=8lizN7+wb6y%G6Ja83EP2{DW2tU4=E
zz`ueefPGd({mIi=^S?31|K+o?v-;0)V&}v6zxkv8GrSNAU#8l-q}=VLZbEd^y!crg
z8-CaCp10#cp?7h5c2&ppunXr_9K7Py*tFq=Ta|nj!>4{(Z}p~sIvM}WcQ!?uM6UI1
zeY0_8GULg^0@g+Qzn^^Hwe|7&E<2gohs1Z4wb#q7UX@i~x_0Z<1$s9p<eoh6nBmJS
zKa2GN(=?uUKAL4R$y_3(^hDwd$Jde9<fJZaS>0T?JY!bwoabw%eOoClf7HQ*@x+ZE
zLV+J#YZOaXf7`Ypr{{^zlbH<XH&6T-uKuCxU%teTIa%8$ZZq)}lGPXD+ceF3_A93e
zbz9$FW!>7lbo$J1(Gd!trHg+Cw%)1rvzo>5^Y@`?lkfew?ERx&%W}(yYwOGFem;?y
z(#-jvVc(s9ys_(#=KqxWC-a}-MoRsQ!0XH0lQTu{OfS53V7gI{r_nLy9pCeoFIYP7
z+_h&OOXl5@j1#*n?9td|zH0J?kGC3Zf{#bpwJ!b>ADe&lR>95>-_~Yq_`7gwSM(hX
zhP>P7?*{&7sFimP$bS4VN9o+@L-*t^m;O!rl-eg9p*mp&`)Xt6PmTX7*!G@#cJ1D$
zyKmnu=&F!Od_0v|o4=sK)Z6#Nx9-I!E@f8P%};t~a5g(<qS&vgObs^MkNVij{RzAH
zC%iE4u;b1RQn^PqY9{6#UcS#j_)f#!!mxiTHGUtS9&g;LYwdbP>B{@}Z|A9W-PYg8
zHTT`|9}cm{=heqlt@-xhuEU2IXPFB-4j)%)3zKV++d4n};aqmV$Z5x2F6X+O3JPRh
zlDXGE;D${9T84^un-^z^|7T#bQCyWd!Sv|4p2WQdD^8}wJZHY${Dp0|+K<Z*zF!FY
zb?xW!y%X7|E?3D(&-C~B*thest=-m(KUT?>T|ab5zj^VN{(y#+UF<z9rLqAf48Ll%
zTmLh(oO;N7Hu=Niu<j#kmT*S<g|=9}OMfu+{AIf>jOT0YF08Nd{@{Pi%F`!HTx!nq
z#K+%HP1q3s`(>f$L&gt%j}K4#SkZLoxc{|WWr6$76>;W98i&m|pR1%ZPn;9Jtj6fW
z+WRdva?@sYuYP52y!Y<C%M(;;3g`V`V17UA?*0$gnpeei&vkaWoUbmcx?@Y9+n*C^
z4Ho&W&dz)HU)yTWUy-G2?iDU}ZTo_KuaC0plx=t5ke|Q8wEA)WpGmUi|L*>0_)=4P
z&sC(rWuvRqaVZ`xg#&>vSPSSqpSyNtg!8J*@~YZ{iL6)jS!VO`T?<;yew}U2MF%<m
z{|r2zAFTiRdewi12TuPPK8hCm|KNX+EqN_cSMR;qoYtK?RO?l5&p)=;sEeWP(xqF+
zu4zYmC~e=~S$15+Lo}p{!9w)dkIN7J+y3O2ZNGlHFt=`%$emjjVFgxfECPkEdwyRp
zbY-YK-oB6XhriQa@y%<mtu9&oH2Wv>D*1o~j6a{tuKM1zPb^~}>(zH(d5zxvbKB8#
zL}{LDimQ%vkA}*h#gnSSi(MHC?9_i)KeQKpE%a*phP%$?W;WNi&CZz6*fYUV@_?*i
z)UNl3-}C?IKWr8ywwPs7^o?U1Z~Lt8OuDtONLu!Q<cWh{UdH;0Fg_KN-~7GhZC>)E
zSMNjjw;4~}KCe{ppLK}*iLUGU&HrTLrEdS2b@knwXN`|1MBU!BMr9e($L$I=ixuQg
z?Pcm>(9)irp|87i+s-!21yj8|Wx6CI{R13dX+>!;pZw~7^Y@|Y)7zf=@4RAiOx-R<
zsLk=q9ZvbV{)=i@KTMlmwY~go<!Z0x1<^a%r_V19F#a*gcJ-Bv1#F9SGFEhniZHBN
z%)sv$SNbTvsZQjh*pgpey4Pk`D(9DPELipSRY()#ucW<lT@0W9GjP?&d|2OV%5o*C
z@6o@Xd@r8wE%+B*wW@)w_MFGpb+6uCy7%u~w)c*p?8*zrRq~7^Jr1%hThw4PE&t~0
zL-}Q&9BWtKj^24X`nc89;|o+hk6)Sf;^B|LhuP74g`QoySDkk8{+aC&OCH`=?Dm}C
zE#&b2ZR}nV#<LaO2X6UyeqE~^dPV9<OlA;g?vWtYpmOb+1I_b7s=~cpMHmnNIR4PR
z%_g(_+wB`^+x8#eEzNi+`|)ytLi5*SwXe0EeuzJ`l@ILu_jG-!=VQeQaVOR^^tER&
z+dEHu8#mjP!SrH^n|r{C!=RPqUre9=T7STwGyeYGr*n3-O*>a~@#d+8maiL5dHiDT
z&-$vR!QB5L|FAvp57~LSM)xA~Wy`kSJ>5Pzf58C;{sofnV_QW67~>oX?LK3u`U~A2
zCh}rY7em)0mR;MX-Sc@fmCMZPL66jD`5)>38QA_aoR1Cu&%h-9pP^%Y@S+BrYo{NC
zH|eSgdFED@F`hg5&hP%860R@W(%imA=d7)zGuEzMB;m2hVb$UrTpL{(3jZ1X$b9%-
z##83klw#*fsW;KH)485jJ$NGEekIJW`9t`jTY3v+|GceS731RA|8^6btMbBI4CfzD
zjO%q}DEKG(BXUuN^O16?bF&MpcZH@zSIYWiNHiZ4SC=?&^5tvqNAp|lBtDj{Dz$Zz
zp0Qri>7?qO-Q^7iMjXlt3F)_XS7|WM*#CjAc~ynm<-_3{JWZ!A(sFqEQ76@nnN9ZN
z+vb-sy~_{Cb30mX`MzuG)m>&`_ad(-{(2iwucUo7V*$VYo*(uPSI-x|`Fh&3SI4}Q
z1;xItSuI>yaeG3@#6t}QmA^j6HM)v0PJLw0fAhy;hn*L<GHYzzd3(>1gV9eD4zo1h
zbuF;}aQ+|fe};Lxmj7eX|IZ*8x7?MXGX0P7e}?Az=j&GfXK3X9&u}C>dqsu0^VL1Z
zxeKz)nx37yrP3w){JHPyEe45AeC&bIR|424^WXe_=uzTa?}+Szat@x8{pZvFGsNH9
zEAM=v<UH@o!k4#v7ez-cw+^3ld}mUn`kiZ2MHmlMtbfSfwkLFz-AcvX8}}8@RNYdr
z$oZ8cOZuIpnT!8Oewa3y_rfIU*+1VURYW~{=WuS-f#&TD3<Xt>?Y_PU31C-#kl(&5
zy<yVNlGnRWH&z5}zZL6uVfV%!#=no}AKJ9X`$P7jf67<p2Qt0(+cR<diEnu#*7X%D
zS<+v2eX-JDKKsUw<A>L2zdhRutBXx<u~e^I%H|@;^7hb!blJi=DQUZV`Q5gD_|}=Z
z<fbI=)>Sz@CvN7X+sw1gcIsmIyk6js*oSqySL|CTvHNn~lB9Rnf>^_n?`N_we>oca
zVg8|ho*!O6vcxXliHq-fyJJtmo0+e-FfiEtIKQ`CB!J!WQ9sMeINjZs-j%BD6w2Hr
zsqiUALuIOZndA98q8Behwu1Y!-?fjLwy62t{I%ZTaSztwf7YOFXpiE5v&PoHShfzd
z3r!ZZXY!-|-==l@zXS)cu^-g`sbVGmcTfF`pW6G^q3l7se}OR|?v{IfVqv=3d#y)m
zo!{ojuHmrw^QuaN`Q(3whx2Mg|1;Q@#{c1n|EIL}Mh)u+(<zT4b@TNNii93LdHZL2
zUiRgT1spnW)|R}sSgW)4(e`c7YNnbo@}KgDWBrfL>b(4Qc2>;6xu-l2yWPJRCes}C
zC4hax`ZsSM23p)*`C2zex}f3Aj6dJ%zy5uF*+iGw`@^-Va<i929?#B}R}`P*U*!2@
zesNSt0K5KU{`P-zACCyG+n9B)##u1t&}~)67yM4P9ZEZXgVI{r^}@52<*VYCq(5z{
zoT6sSca{B<(%z&lhOT?}q`g7e+#{jW$z4iMG^Bt*wzYEJ<`2?`W-VK(`*+K)wi%x`
zruUr-<(zz-Vdb$}#^YP7dPNwYyFZxU+~0QHB_i*V{ubRM)44T1FD(rV+}Ux=;CTbX
zy6H#j_%7QdFI4Max%I$SPcetYg0)jw_&n^E?c&m4;@0+fqoe!g;oYavM>eWGp4=)S
z`DZm7KXazpKkJ<zn@rDtn|mZloz3y-1FiMkJzpH}?TXT1>f5+9`t>Z+XH&~!qVFEM
za3a|vca1?oZPxZS8}sCcx=i=oF8dt2#&neDjO4yFogxR>lWVIqn9th;en>tvPyNdL
zpp;#g-!I9%<ejSE0IG+K-x<y-Z>{62h(2^{R#wLLsMcSy7tdBJ)a&jIVE|P@Z{wO>
z8H(eKKG;ouyqde(Hg;W@c4f_#Pbyr+J;f7hvzIRFVtBUyA^WBJht~fYS`L?<|2BW>
z)8sR9ab|CS?>O&YHT~#5o{Kfv%Z{h6Iq*rvf1jpuQNMIDJNwHUA7fcX7*AK&Km6DI
zV7B&cU*oqz&J_pFZ1<D5U`bx^m~CgQ{{#7!_4@PPovL;BPPa6?<zvS4^+2jht#Q{^
zD-Gth5B-PYxqn!FiNAT+_Ueyww`U*cJ2`K+0|WE>p5NDtA=|GRpU5fxXPCU;!~DNZ
zOa3!l$Xk8=HRfh5_}XulV$d+z*Cg<|aL9UcjYSPy=dYJ!#|yleUgi68LUgR!NuJ!c
zW5oxbU)rLvfTy45N73;^RU%e)bJEN<OiML3Nc%Zs<4J~cQ~!hiI4|uHOfAk{YI@qe
zP+{ty?>xmlg$>$^7d06E@qf5lUhGBDOR0%7qP<R?NlaKf)lz0ct=6-d%Wl84vRyVk
zf9Kk@2_DLy`8-1d7TfLN+Ud$rIc?(y{X?^+E%?4|Qha9Lb6wdvUCPhbF*Elsu=uue
z@ekz(KfbtMuGxJkWn#*t7pH8V-Qr^_d%)*;fZ3)heDOsG(YJ3NZom9!Q|Nt`LJp_>
zmH~$6*NZ;4T7F<YSA4wf-8sA3W}GWZ-ZF*d`8sAz<rjRmt1quuz~_DNKLhU{>j%p!
zx13O!^!9poY|MsA!xsh&%rDq7f5jjEEA8_!_j>2|yRli@r8n!IX*`s^?gGPcd5teJ
z;i0Y!Ay-XK`=)#Eyqv#K=+>QM$rJet>L<SAPmZvY`ye*`;aaBOE}M4ltCl{R@bSDx
z?(FH=qCee??5CER1hC1s)@UxTuy(Zyn{jz|v9kBfImRiEZ5nK~_}PDGdwp<iJvwjF
zbzYHAKi5Cskvxa-7{ix!eO(O4KDxK%*hDWpxg^UiF;{u@WNy}+l_#Dw2fPxWRo`eM
zwCN=8m0x8u^JT1)13pO{yTa1sV3RQa*!Gu`okfobIof2Z1ZldvONIp>WRY9cVAFU0
zVg2F#^0(aAXR>{|B;CC8;ntZU%H?5B(wW~R<ffjk_<i`F{Dv!ktRL8OJFG3&eKGB0
zbAExJX!-UHdWFZ>EuQV(eZFy?=&ZNzbn;AnjqC6AoqNLapW*nF`Pa7ZQT=fJa2?Ow
zbtSJWQ}-r>+>N#S$zbd7fbj>TgKRCdIRoCTA1z-}tF;tFYA|L%&)NZ<x$}u1Z7KKq
z>l4t{f!84ta>M;7O@+G-91ofmR=$3Izuk`E!~LGE{o?1>MVOwM_G?Y2r0|{!)*176
zxn(flU$mnw=V!3Hn$??x!l3Y$Kb}8w7gR_ex>T|Fu+iPjsHsyfN_53+X20{$<7cPk
zbK7dEucvFPr0v<GvS+=ycvEw8SM{{XjnYO<Zni~*eS23hXLWrp{qgj{Vy(6v`}*$A
z&bn(myQBZ{^)KtL{bx85<u-kxY~JU9HeK`6k*vqwtk$l*yW{uG69;57nEkpwug>qi
z$F*Z;pQhfrw~ME4J6>3rby~Roz;6EH*t_qRrY_IC^vZmCpkumFWxw?XF7fO3&rKM!
zx;~$-@14gn@1uhGb%CSGIo>PR&Q(nQ_fe+SL-d8ms~78fBqLG`ELmVn??o8G89#3S
z=gGJGKSSPshF^A8p!r&qCB6*#KZO5zzHi(AMl}0B!=h=RLl`(gCr%uW|HifTKf^`u
zSXKtKdUp-VDGXuw_I*!bybsw006uvPJW>xmg^Vb6_ypARTh_;~S3o_u#6fO9^mHCp
zD^STs0agdMGy)yJ`(InS0DPv#&i@Q3&5Lly7&OnWU`B0WT&L>sEtmy6`Yzn{@er4G
zX)NG)4mz9S)sg=U1+`gw;RjcUka5V3Xin|LFRP}4$OWuOXQ?osuuqr&slNR`!#B~%
z{~26e<Mzinq7NJX0-uBf%F5c$=eOSz-2YK+^P`u!Ho>biPV=o++7xjj#B*Anzisx5
ztdF_Ohvy61$gj#?zc_Q2>d}2mtG-+An!BBG$K!Vs`*<0`KF&X^o9%vC`pDv=<x#I+
zGTi;!9T{xmv&!fO>&7`Lk7a&ccYNe8SP^|xo~xub`>4`0ZR?dmp<A|ER;I_EN@o7i
z=3IK*F|@Q-`O)#79C_C(`!|&IT0MTcbJo?lPg3435UToBI6rFtWBIo0ML&WMPn#z;
zNp<&!Hz!TEl&e3T(IdR$%gF@(<?2fr(taF2a!=`+4g2a_xBl4g+;W+>r}dqw<m2v>
zy&-cC>FsWi;X5uG{ZEklKZA4)<A=M`=9*+z<|yy#RkK&wUB2${5A(}iSFXIZQ?EGh
zvisuuH70ABZdF98O-=v2WOo0K9V%KIA51>May6QtrDEUjhh_7Z_<Ve^>t1h>+|f&q
zt9(|U_-yus!PBPrySvN<*4pZ0+wb2HdTB3cCm-*)Y)8nfR|k)5)Z^zdJXC!7nc9jP
zpZ#0sJN~F%`oiG!su$(OP18<m2A6DP3F+AQB)yQ`_V|jmZ{6~{rM~qRFFgP7tk{Lv
zL)SjXOK&(K+ICHmQKi5?`OE6>{~5Tx|FOLIh1WjtQ)#T^(X&%O#xUAFI<7oPWKwNm
z<8_&av}~*Y44qr|^WVwX9?$vCcg{wMrrKRL?#CDdH#b=*%NoYIAKD$8b@<`8`HAaq
zd$Yb>_ruA{F<-~<=7n!8{D*e_?q1*ht8c#Agj;64={hTtYD4ojt>QSC>fp`%qfYc*
zMZA-o^oP6CCVyL*H|ws!=Tn}UGLs@c&wKKqOhSZlJv7fV#;_l||5L^4{omdH8D9K&
zy?-4u^59kp<9@W`cC_bzQ2#UeK5zXEt?U08Ty;xC7$-j3?Y{p*-S%_=HSccDM*-H;
zdK><g9{a<wx9{~s-xdwWY|rJb#}^uj|MSznlzqg+d*O%4kEZ_XTp_Q1xa!Ni+9_`j
zIsJU$pJ2S@caC}&!#CK0Ib@yY`|<ZPsrhf6mvPkPzuBwVa^s)On)!b!>#e@ji&V_^
z`_R`|y!us8YnS3D1$&eAH^r~a|7govV9)l&`}ee~drH@;-F1$gutm%LPP3)NH+6|`
zuC_0juYv{-K-qDb8=uBXH6OEg#}>m24RA4sJTXF$t?8ihAb{lp|AfC;QLeG8SfOY6
zU=PLht_+pi4|Lynw@>7TQ;XP^Srsu=!ix1z9_>}z<Lt6ap3_b<{>7}uPc`Q2J)ht7
zyQ88%ujL!Fwg0w573=dUaHO{MQ8~5Cdz2T%y!tg|VNyp;2me{^Z?0d}|M)vP$Q9Nc
z4*t8M<RJgqztfi*%$7eqb@G3Pe*Nch2jc=i<h$(OnHzmP>Qe3%)0^B*OKMLTPwY=B
z>`w{4w>J*n@KByn_x|Imueohc6~3=pBB~<A;6*{r!?nXgG*0Et%{N*{QOfO#`)__*
zMPy3-XJ~uJch>84P^D_;4hyZ7bE-|!Kg(@>Gxb@V^^fuu=hrUpu@NuQsd44_`=o|t
zw_LkTx=iuG#}>!3<oW(&Y|LK&rd~4g<io#7HrhX5x#-n8+N`kqQ@<sH`O1qGZ{I2G
zZdBhk%W`4ilu2ehYiu25q$a`ri9Hc7DUY}~_lDMrlSu^(3s~11|M<A_cH|#}J)5Q;
znD;MYQKR+7NYV3h4f?M%|7rX<|M0I={DzE!U*E=_JMnhSzLdK!Vwm_Z7S&Ci{;-}g
z`<~iI6+6QpEwAzv4@bQ8{;u#`VZ#f<>oRj57d{qY1Z{4f?e`(CJ-J}1`nJn&A5Qd7
zi?|~Av+Ks9E(UGxqx*y|?MYvFa_Os$V$PpkwF*A$PYZti=XF^Z!$;{{???QHQpKYB
z=KWLqa3{vOl&$7Q@CI8ChO(T#gR2*A`0@3D_m1Y6xY!%pOLlB6Q~vjZNBeTN-tWUv
ze9ILl{bz8woOkDI=G!2rdLa$w$^RKzZ-4mwuTB2Xq#gem9+%V~di$gDKZ7&>KhK!@
zkGo#jKV<n(|Cei){6Fr6{}~=-{AXwp2AyN7l~Mmgul+wm=K{X^f8zOv&;OFB|K#=f
zKSS@u{|wyQA3p!fWB*gN@IS+`*9$Q8EdD3p4?6sG<--3A4|d7_Q){sN&+sF5KfnEp
z#QzK@wfp~>yZ_O<-#-6~!GDG)Q;+{=kY3k}uBZQpqmBH32B&51{~4NI)c=^p@cci+
z!?Hg%{}~px|7TEHXaD1_{D<9ja`hJugO*6h|H<3MkFE#Zd(87k0|X_$M*{>c15hPE
zYX0Q@s9?1JFd_X$WK8`>(ase$m6y~ezo`?yWU8C@_eZagP*?qI`QOh!uKQ3Seu&Fa
zPV~Y-*<<s?|1$_jU8|~{HdlOVa(aHY{C9=UfI5c&_DL`0`D@%C_vY~n{BfTec29WM
zb=QtR-TywYG}yX%+MYP3ePe2<V4#TBiUSM@jNXuuNKhMEk)cO!DzlUO-6x@48cYjW
zO4%l}Us@OFAo8JB<KUOoQvz6R<h=}PwVsMF`m<l^k@wo^$`E|;%VL@-3Y0d_KeycD
zQ1z@@l+G1n>!bUenQ_J!o?QAG(XIG%(Nv3y_|2MM|CK(yc5C_(`Ib{UTV-wk_&(gR
zqwN*X&kf5HswObJxhdNmzCF-EE;aGv>jTpZ*>>#Ny)o})Vb&Y>zaJz*@1vGEv8mtN
zw#fQ9tN!a;c6aBi%eR)a{8I_FM=vu*%bC%F7p3+Zt%=a8b7BI7{c--`1DyXETAJ++
zoqGJAp#!sqpMqZWImpN@{P^5smjA{d@g7&U?*9JB#KTMTUrzn|`NvihECH{r`SM%W
zN?+^PxqH{_-#H2`_|vf3grRFy$BHh|kOT$;2GAfms7C?9n*9=m?_c{bYS1>x*}M76
zvV>our!<)6@h_FIn_ASxu)O)D8~@UnMGcw{eg!ta4A)q|Rbe-UW{OreRZCSJI=-H1
zO`rn@LtvvfWYio!*c9fr|6#50QL*`H*S6fyS7v^yx8JCK*Wsyuc(X4$$l2Q;taW$X
zQN5n2V$*l8G)uGXJW*<PPs)C+4rcGN)31nsdd)^ZcYXBo$Txf{^#_grGc4fxW~IS=
zGUuQDkIFQ$U)QFtdTGx5US~sl=%0@KKV`f2+CQ4!_|>L%{*i4O{@HB?@_#y5OTL$x
z&1zATz8#^LWPi{k@%ksz(^>LdHQXzooPW66M1AiLgZdXN0qp*nD<iVFcb)on>uGJ`
z)R?-qwc%6l{yRU9XYTfB&sCAz1E1Vs?*IML^KaJnKN`PJ`~PJ5@t+}f)A74MJCFbU
z^cplWR>}THTfd`h|Hqdftv<z+e7<=8bJ^m50{43a{xhuj_;&JthU*dgj~PRiD#uGt
zsC$2GmvQ>KAL0KQdZg?hO8>a`Sg7K<Yv8{#+W#3&JZiA5>-xn1!}*_~@4wy0r*Zyg
zh<PM`Nzwg}c7Dfq`;T8g8a>kg^`OswvMxi_U&rF=?3CH>)+ILD@6~B^e;w!lpMlFi
zbk~1|3CoYf7JZOgtYiO0Q)=0^zw`bmOaGi+UbXf0ynVTc8tUIqW&gT>Z-2m#sSnl~
z$Zz>E-A|=7B28HEhnjCu`F{raKk7X5{AT8_%$h7G;F!O~{O=dXzuNWwKb#-fnzQNi
z-9B}A@98}^|Hw@I6&{(mH1k$WUEKV{?6moJx3?zCPhe`WS@7feqv@S>@%$B<COhPp
z9$R!=p{n<G{THz%@hx@gAD!3iv;H{s?3u`un>PMT{_7C<pP{DC^i4?syY0jE%s+|`
r8RlNxvLj(akICE5r|yT_JifiO#02U3736{LQ8r}qmWtsZ|NkZcv@92<

literal 0
HcmV?d00001

diff --git a/docs/_static/getting_started/querying.jpg b/docs/_static/getting_started/querying.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..3c241bdda5ba98d3f3ee2e163b9f19280c5c7503
GIT binary patch
literal 50148
zcmex=<NpH&0WUXCHwH!~1_nk3Mh1rew;7xnIM~?O*;qN)+1WWcIk<R4czL+Fc_f8|
z`9)-<<mF_gWMmXn^wbrUbd+UeG|V-13=B<7Oyt!qZ7qy!^o&i6K!z}Ka&q!;^GNXW
zN*F21C>oIr{vTiv<Y0WnR?p0+#K0uT$SlbC{|JLT0|O%~BN#A10V5ML3o9Et2PYTz
z|04`r1sIqZnVFebm|0m_SQr=>YZ;lC8CV2ag%k}P*@OcV*_8@Kj2b5{<WP3ncu+Lx
z;s+Juq@pHHE-`TlNhwt|bq!4|6H_yD3rj0!7gslT4^OY)kkGL3h{&kql+?8JjLfX!
zlG3vBipr|yme#iRj?S)0lc!9bHhsp-S&J4gS-Ncbij}K2ZQinV+x8thcO5!><mj>E
zCr+Nabot8FYu9hwy!G(W<0ns_J%91?)yGetzkL1n{m0K=Ab&A3FoS&sA|M_^^Oqn4
z6C)D~3o{El$X|?1<qV8W%z`YeiiT`Lj)Clng~CckjT|CQ6Blkg$f;}`^g%SK=pvVx
zipfLOk07sseMX$en#l4Q++zrT-D2QjW@KOzWENzwXZZe0V^IUwD5hN)Xi7Hq`MRac
zPHRwIwD9wPhMRvgAS6?kmc0Gn`PceEM1$Jkmo%N}AE)KFg(1*YWE9gd41!}1>1AGx
zOP+pwDTBeJ<7$(-G#D0<ftMdwbbjlzs6j6<@KDi=FDuD2g&dnlqMlWo>epm#4Ndva
z5OF_>Ay*{OffX5x_CKl@&fF(-;n~%#mRyJBPp#OqrLRh>@5z6L&KKIJ*6a!V7=66!
z`aPM=n=fwJrsAygYRQgYJFNFDbXdTjyJ(N!_apxqME`I!{XP;_R&{RLlFCa5R&nvK
zVhDc~()CrJyJE5L$GTR&)yq7h)PKfGw|5>7O?+j4-~!uKWJlm&3p-w1mDR@A8yvvs
zDl&>`6$a3>fisQBiGK93@3zzXQGAH+jm`w+GYcN8cBb3Mb{T#(eP`e(x^+*1qttO8
zFF`>zrWFzl{9Yg4OYivb{Vngqzj4o>Zr$l|{fsSJxl6rHQ{C2gTNq9*?0dwiILmaB
zrpV&OJZ(V+j&f5U?&oo?aDOb{u}4{Y`;And4+||1#u(K<?s5G3Xzx+=HqVq!Rkukx
zl^tt1eR_KOZi+tK$6xqJ{+4m<)wTPwvt1@gvO8}L6Fk3<Q{Ic=^_P6fSKseTna<}v
z^I@*hr5h)OIzBvjWViPI`&sWxE$?0T+_R%wE+xQvotvA}B<&M>O#d^e8$OEv_1XTE
z)YsmU#)p4B<}qxY{-43Ygz>~5m5*sFAM<mZUzzn@I7apHA)d?)xg0HpUz20LKJTxI
ze(YQK-s?woey;4)**UXAb~3EY|5n6tJf^_&_0iNn+S4-Zn13wFE^;}MFUj#rB6^u&
zL_Lqbo$}2eKK-08X2o8c_nq&o&bx(YW-vTA(oe7sVD~M2Y~K;jpZ_-g*oIqKRVJS}
zZhgAQU^qXA^|{Q0^%f7VM}IUw`hB0w2bHL2?It>(&I<Io`Mx^wU{3#f(W4cUZa-%4
zDv_QmANb+k#BbTXyL*Zjq$)NuaZY*gq$<SD`tnv=<!@D&O5P^vYyG<Q$l|Vf`z>a>
zdydNQ-Y(#tT=Du5d(YgY54ugdb9ZU^uQ<`cGU=`Kw)Z~^_!Bta-`iHQUpo7mz4+4)
z)Ap<IW)=!ot4B0V^0>28c|GIxSK1pssJ2+;H*As45YXb5nDd}CE#OSOkmsN5rXP=&
zo=@lNeQax6SiU4?!Nm6O+)PV;vq}7CXuQC_Dl7a$W>oCGg+;d$wzfW-@SkDRg2{hY
zcm42x<S$UM-C_5&(rqoSst5ZnD);A!)_pu*=V$*wZR0-OA9LbsEHCSpEQ-q)vhifn
z{5i|GP5HCN^1rXX*z;y=_)&Z~s;-}nE6QQxH2EWUBlin={>fkt>(W@jgoHU4ytr!W
z%<i^w1zDBZNUT!G@iV9z%3m7=p%~1NA^;Q_e8!L58y<eV-+pw{i4CvBj-1Ri=3}%C
z-G9?U-iP^|VT{<!kJd-)*tf;1AFGqvyW_XTP1!H8vF<+Gxb|NWe1EpORfO@=)qhLZ
z{b)ZXzUzKN$=teg-X~_iZ7(eNDf8{n#|tSB9_LxCcmBa{*2vabnV54)ZU3IVe~On~
zKVT8N|4q+dM|E4*YSEDGm)_{+zsq0y=0Vr1*ZK2}6nNF{&F-&BYp^LkCU)z6qn*zF
z-YtLCa=m@_wzwB;SgC3k8ph7>_IKBTiOrYRX4zSP<UgWo^w{~s{kHdOl6Lk^dLOVm
zMkjFMXI6%UjWT@2=lvev)L6i;^Fw-ROw=Eny_>JRNozP=tM_7^l13y?VPE^kC*ju1
zJRjvZ{hIr=@#4DDex4Ygw6^R5Z<nf+2Qo4Y92V@7>}#vniZDKsdfxjd@T1bz5_zs4
z^?{cc&%ORu{5#7r<$&Ve!>upRt4ur~y76PY!|vvXqSN|QrdRcRyX??9RmxN4fMs#d
zJb(M%s=XqNQy+HRljnSK(W_^YY7f_`!>k^T6Wo)7Uhi^|`qx~$<WB$H*h3eE`*%+~
zDEHvOKhcG8m5<MNL@jSyf8X=cAJf|Ua?>5v)ib8fGI-;0kkQZMfW>*sw+?dIAC`Mx
z`FdV9^5&}@)$tt1wY0u61uG|{U$<1gWf<>vo%6%uL*jD6ALjq@5$O&4=UiMmdD4=<
zSJihiPk!fHUV`3;XEc$OTDBtL#Z^-VkwDi`OoK2GoxZN3-|fTInY*u*N>^J{OGZD-
zPmNns+hDrT#x1)tDKbv2R7<5_db0A06UxeW!cWwT{1JX+&y&5reD&QsS<|NTc9cp=
zY|Q@_ZC~2H+}}Te-}CxDd;gVLf*Jp~E}P$pIkmyHw8Cn$4DZJN;w$qH?VWc|Ghths
z=5ymS9jjH&Fc=xgY-49upQrt7euJFUzv~}Qzq+jUI#)~IK*+*qUnu+29e*9xFIU0V
zh7F(lC+_3CwGY;E9s2C?$>!#}n&qDt-!Xfu-sirQp-NTsu|U>+>36x0=jkeYXFPRy
zQSM^IapFJ2w5jUf+K=o1Vt!fw{Z9O!q6XW(uC6<$a+8c3)HG}kSg{?qU+ax%DvKV8
zcpaxb{n5IGOYUU8+b7*6dm*-VW<&aocDZ!tlliA_?XF_bSk#3T&*JM14lyXnnnk<L
z{D>YXS#_)Xp6jwZ`{wf$$F1>6=nB8J?#Bb+8)*%e`LZ>xm-h(hJeb6(|5f{>XyV50
z_Ipy*S@$oHJukPUWc#h!X}gzrrmu`#b8Wj}ql)nQ<a7KB<rXiL37=bI^`m};`>L~#
z?Iq`Lspxw#txx@+*y_tY1=2D+wHJ@Ct(wYEEXVP|ZuSFTvscG`D^<L^r_I^3;|?Q(
z*7F?<^;UN$|4pvVx*7_iG8nfY<yq0k@mu2mbj|YrTQC2YarMpYWBW7q{}eTu|LdRq
zFXjOD{3R8y4+Umy6Fm7SR#jH_aah<M2S+X=e$zUAnIC?YlO$?9mz>n;5f#k4FipLI
z;e6DSFZ-M0Sa)8^c|P5}Y?6oIoxkf6Cxr<A30;`|q5QzN@CmVwORUbabgSfkTj?k1
z=Xfwb&_T3L_s8Xs4{>vs#=Ptos$KNrxZxz*{|pL}k2bNb{1|-9@IOOVMMdT>waMyd
zGqxv4ruA=@S)FX({`kt}zl+-5GCQ*`+>`j9q3`-;S1D&9SE<%s5v>(37+z>FpQ)4m
z@jLd4jc@%!d9mBBVjDABuSHzZpTTCtAyblYppRqqe!+c8cPf(Jop-vh=GuRTBjPtc
zElTh@#(6-6Z^@hoDJ3euKW83)>#k;fv-m$l9joJ#r1#$zT#b4^z3I+R#@$IO%<><E
zj(=RhedfN@mf7`hqdT{(b+b73)8vWk{p~B6vwG$)mOr-kkI|2bD=Qu!opnT5%YFG_
zj%oLF8>~_r4(9!Mwl{2#`$L}s=a0?D1#dGbMy7I!FOVyIP#wVTzwF2P@LfM{ulf<@
z-u_$f)*Vl&jt@U?N*CP^GoAX_zVp`h);iT?6GeCI-k2Sl@iw{t`=m1mn>6wkh}{k~
ziT`Q&xBB0n<NvzaX0CCT`OmPGuWOS=s@U!WcB^@o$e&?$5X)yjc7K!Ss-?%K3UEKG
zPdn0ajy-m<R#E+f(rbT=KAx_4eb~3Wkjt??SATNR#OIk7EC%Nt9ACcTEt~piKa=Do
zdGQ&?V)Le~`2O>wgVa97AB~S>(~p+RiGAQ*A{-O-BrZ$%vy6+3fbntmo;gpRFdoa+
z5B#WpRG*=;YI>*G;iyaIt#j8-(d-a@CF=2Of_vWAV>Q})KmIYZTJlf$(w5&(XFaH0
zmA#$ud*cMl(#MnPg=5#P)jt-S?Xl;wzK!vs+0{FuB+X9EWNb6Cuf0+#_l5CwmEX$D
zwJ*(f&Fz_&y7o$v$L)Rm$uHcUY_pyg{Ab|&Vw-)qj=kUex`IHAbD?4{OV^Lyj>J=w
ztfsAdgi;c!H7^di$|Eba45eKTY8H-S)V>Ni;lWpY?4HTx8vX^ID_`;Ma-O<<dvN>Q
zzw8UX+O|K?I^VLj`)>82Z&LFXF55S^v{3H*_To8Jf_IoK4?aoTQLWz|wQlc6vG+&j
znNH)Loo!k6vaC?%=!T*-jxYP2uWs^ZNWTK<H9?y7;MRQ>>KId{`ti3`zk@y;n=acr
z_2}ibFViBF(iL*@Jec<8buoO5*6)#1+Pq==*8dEx`K#<F3mxBmW<ojx&*PKdR{m#r
zSn{L$U+1!w{~792{xdu_eNz&^-utIu%azjo+&`+Kea&7)M%9~nEbLs8@o6p}d*0MH
zQvx02TxR8d|6{WF{@<?WZ{kyq-?CZu>GPAu-W$iG;yeEFUthB)KK=1|PUmmkzYD}(
zU3H6cV7t?JwK0B5<9TMedpqN@AHTH8Usy5i_z^q#=w5%1UbDL|3+GI#WNBclWGOOk
zvXofBGyTWvM|YEMy!@r#Xewcv>2}`if%#O<mph(1JigQTXSMu;WwY%jeiT0{CT`r_
z-CfaA>}AQT_Hp<8b=g(VAN!s?x;px$9nZC=S0fUWrZI1fzMtnQcSF84^N;F>w51Pk
z>w1S@-~YsT_33A4TABij{4E|A>UbXC%8-)rEB8~~C8@i=Z^o>heT!i`o0{OpPmd&c
zd*s?bExl55``O>6Cf#2*?KJbuIJl0*_i6K!QwGrzuiT%<?RI4leG<>S<@Q_KJ^tTr
zYtPO%d&^T-;xqgB`4cCe?JetK_{h1Y#{EP8(O=V+F3A*nbx-oj{WFs!7<Cy>+BRzU
zOyDzcvpnx<I=^$D;>UTj_1YiWb7xJ6ja@%g^~&LlR)3Zzi#Yp^iCy1zX)yC2l9Kr+
z`(bO}%+hkzyH`AXKJT8U_vFcgc?HIeZ&>q?szOk6-k05N<thm)Q(r`53&(IE%D5%E
zn&cndN2AnRb@p9fn`Lmzt$1U^(<gRaizol!dTOUs<N2SVB`u>wm+SU#_aoP52;5e<
zqjcUa!TJ~fkq4!xKI}ilzvbS4hC_~zUr%0}9rWiIr*w(IFHKo#x%nk!>}wf9^Ly+R
zKFX{WZ>*6lua&S6FK^DN{Lau|FD~*h`{Vh*pa1SId|COQVOnb7#{Ud``#-Ob?b3MA
zs?=uEry?CH=)h&mAglR>@$r9#5B)X8{}~pZcmB_?U;979i7)ksY(Jj=*KW)I@6Y=G
z49eFx1Ukss|7YO%e%$_-%oqFLpZ5RhcmJb&U*<o<i{cCa8Q!1z&+y5%npI@fw9#-I
zO(COsVYCbxEiFguh0*$SP_-PS`TMHs&;MP!@KwZrhH2HJ#s3*5T>c4mqUC=E?);;1
z5~cAnKeQg*<9WDVr!GZ)rdIv?jd9%%=S#kj)8A_RVpmT%t6NITpH1%nKFPUXU%oeQ
zmrlotSCSD4D<xUNr~lae;OW$>=f7QVe#h6m&ZYcV?n#dnyB7Z4^Yd&S7w`uz+H?3o
zz0kXNU&}sB?^)Y6|B;8^QI%Zp^@TSKWj-E!`!a4m&eo>nt#><@|HyaWakbRDwdoS;
zX~S3ksmwcV8P;V#Jl_4aUn=wZuBHDbOtY%?bDQ_O?`86fBL0Q)6$M2OGNJquHP%;b
zvLE}}<%S1^eb~FGa(+U;)_eVe<2&r{y<&4<wp{PjCb4!wABVEX!d@pOg+r_Q7??LK
zV7~}mwh@}?regOsZKhIwoK+XYTJmRnp9Z~cbPzgZ(DG~6v<VChOw&+$+$1fmdRnJg
zaf+Y$hu7nfg!Lu&cWvq1-xqw@?(n4A*3!_&Qn%_4)qZ?;+O7UxrkzatTjAv&w(OCs
ztu*YZ(hgv|e`r3_*(FgYANAIk9`7{uIjb`1(*@<??Kk`D{pRlca*FcqEjeA{f!4z(
z|0w?O51Z`8oVgp>cBYx%xT|@qoUcsyg@4`g`7sASEI;<SVzbk%(;;lO<=*w1?;ShL
zbK`-XwQv*v4!cFWe|_v1J^r78L;l!X;Ug33-uqwQe^I|dBK9X&#II7f{|pUV8LDm3
zyGJ5{4uY{U2UD|kuU<LHBe1YirR>nEK2P;ad!8Sxv%R*b_;K{hlG7TO4~eZv`t5Qm
z%ro`P>97CZCpH!MX{SGuZ+dT2xa?-;#%!U8NZkX~Cngp@KmKxl;0-yWqAISJCbde(
z9WUj6Ics(|_pR>roWk{=?UsC1-?@PM3R-8+@NxL_g<n_n=vXCO?b2Y{KIz~5wQ}CS
z>b9nJF|2*EfB9>7kVxdB2JK4ym%lE8M8Yx_aNYC%RksBsa&^{#X=+TcKT@wXGeqNn
z$QH`hqQu#Lu&K{{WoPtJ`p>2KtW;*X17bDxj~{;$ePjCgKf~tf_WWPP>?>YvO8L)l
zTVn<D4O|lo!(Ckaw4MEp?)h!U-W00#J_*+5TdcDD-x+(YuU4P`GxRQyZ?Ss5{pNp$
zFEu5MH#4>`54uqD`t!ACw=DO+U&{XL>Z=1EG}Rw|Z|i(jvO4~+$kzQRMLxUNr|k9e
zuj_PwwB|1OA^GUv`O8oFS7r&R{g_)NDz|urgu=!b4wCB~Kl*j5cl<G~U*cMC+I-g~
zAjbLo^xJpjS_&ulo3c0mQC?P|@6hdaIr%}&WuL>(Xa3$?-=27sAs<$tqpV4ERN$H1
z)pKU?l?yCWLgK1^N!fh;V#25^Dn4&tTfc2BFN3$M>+|&s7oU%Jb`ZK4So!)z#^>u7
zF0gE&i(-+ZQ=|9UZTZCjPgd*fkM0+)(SLCFVwU-#=nEE`x5=gK?|8BQgOYZaoY{}L
zkNtm&))>q6C*3dm9{Br6S>g@`_lwc59pp}VFsnS^dT#13U*YX`NB@uW@?)>Bna;58
z|H1X--_+-iA6(n}i1}H?`Qzf>8+T@BJGmski0=PW5bn9OM*M@?^sW0hw%%#}@$PA+
z@z<EQe>Iqm{xdxO&(M_18J&Jb|Mr64wi{#qGu%(j>i?s+H|jsbKZp7JrmHVhS+g<!
zXOMOKSNh6KN^bH}29vjXr_S*I{JuTnKf?skx(1v2f5QFU<@Y~6{P6D{&zIjZE%oMJ
zc6UFXd;c#nN_JlTt>FGY9arCo1hDu2ZGJrG{%7$EZ~rqq;Hv*NbMbHP$8+X?Hp?h{
z{BNed{Z9sPTgLux^T%^`^$+zfJo)x%`hSLrN9AwcmwEV~VaBy~`#+5C|1OI{Ok4hV
z?)pDFUX;)Opep&F;rO)vA3pmGZWh-*(AGTg`dei2{8Mi06BdCSD6Zd;WB<hX!~KYD
zf!92~PyBU^{dL%y{Tce~e*a>hM9ppPlRm$lhyCv-&%fGNS1o-vefcZTP5%yEduf$B
zPqgq?7sJ6z|5Sb$M;zGvQMNJd#bnPX8-Hm(xbD|CY0;jXbv;IsevYC?Ud`vQ;a>I5
zC_^B6=ZkMW7ao5I692L;%D?e@l<}5({2!;ST)MBEV`=V*;}@oCAHO2S@J&kl*!d1S
zy$b!g*>2lD>NeHBe6wS+{09O4XY+eY<%ECie3&olHLbhoR^1}M`<vn#dZI5zFRnG7
zpT+PVo}3nSF=!wE&yX(vQ+-zZ-`14>3>O2#H-z{9J7@pN-<1D1*UA43ixw^5PrzJX
zl5OC6ashkEGRc7Q6_Tgt)obN4M1G!p{mbHW{_&Sq2Rew=Jo)-p10=HQN&xG8OWS%c
zn<roYYHbx^Ob03H2Z^k8rMjktN^-q^t4h91SkFGuK`di|fSbD)@^aQT&w{e`k208o
zPVMy&xo>J)??nSg(9gSgY!X`d-v3-xs8!L<;DZaeLxKaF*ST6@Had;_wp>r>o*a7f
zw45)4U#jChTEz?=r-G6R=?N$J`j_Qwyce27HP~e}n4XG?6utWyjl4vl@;}3|`#)8z
z-v8bGpW&rT=?l^4{~4s~pG<hY|J(cj3`+$susc5R|EWGt>OaGV(DnbiqF!jv|DgV7
z@_F9+8(P=@Gq~!OAQrHPPpNVKcyDRtn&pjqgpS1)J$SooSI@_cz5nvg_AUQWx4b{J
zM9A%gH^YIZkO}S=`lg<)(0}-E;)9BJXFI1odi5@>GH)G6reyJionPcvKKad1A{8TE
zwkvdv`1ktk-F>Q&>&#d_%kV`rFY}GYvT8w7vT2>KuA_14#+ME}X?jJOThLYwV`*Mu
z9i8`-Y4zjxe;RnvL0n^*%T52~BEJ~4PwYv2EL(rnPHXysd#v?p5k;oD!4?8#T1!ff
zJGomvNM^ad?s`YvxvJK==Vq^8ylZZh_ePr|f1mgC@$cZ>|L@oL1$^fZ-sjw7`Osjy
zF8jA>tNfnt+Z}Lr_fA(EecNBZXT2w8F~pNSu^(<9jx%mOw>W9zJ;6`+ZZ}M3epla7
zZI-=uQG-pm=ST65I*}hu*AMU0-(}U_o;N$SZ`I8+agCjCtefZXIVvRWUA1TP<G<(T
ziyw|u*rk!TY-jhIuB!L!I_`g%3(oJ~TlSB8miK(7imJ*vdp2cGJNKq)Qc{pFLmsR_
zfL`paPgpPicTWA2d0Fz`L?{1eaCLQ%JBZ%@3w@fi&zfsvsPudJwG67nMgmlwu*2Q(
zk1JR0v471P0iS7QJ8?T;-q*0@^WXKabwp_(VU16yQH139JGbrlViP)Rg;m#f3`0ei
z=AQ36G}Ei@<F8BPH>5AEU3q80Rj-ZwzxE$%P=VDL>+OHs{?DM&9{ZnRd;MRHzP(I)
z{xdwz|1(Lp{NLUG3||&mF}$$h{Li5B`EdL<uC4zWE_%nZ7XM@U&oHU-;ridKQU4iU
zEM3<GDYrhK@2nT(Z++D5dNKKTdT0KJY1dwuOKd(+CbzG7|Ap{{*UeKO<Z>N8X1-6e
z>+bE4-2swMiXPb+crkzXlwiM+kRUvPW1swssPpZAw0YO<UFEwXOUT+++vE2U*PR;D
zD;b_ozB8vV))&XZ_E6A*{i|84PX61tZwtdMV$w7YHAvIz`=WlNKgdlF{jg-7_J?&#
zuFX@}uz!+BZ-?M{i9WO2=NX>PUe9jxcx|nBoc(W${|qN>uGqiPR{z)4)u8htUMhFN
z?tjNW9-C{{xLS?b`q(jXyT!YjfAoJ_efiD$wWiV`J2&oXdv^EVn++#q{V()st=szW
zukw+ww965f*S=4Dqv|vDd+=5N9ZGwyeSVM=ReL$^hxw6N(^?BEKOJek@H<7|#;<7$
z_+u9BDTt}b{&4)X?z)+eyi@*}DA?Kaw%a!@=kF_inWEoiBfREiOF?XSf!*T@|7RJ@
zH!{{+Zus%^f%Y5rtEyX)|Co!Nvkzsd+j{ZlkN*sZ#DDy@@7%IXebLrS+I23?4zjYM
zaku`R`@o!C&hp?t!+A~SdgU;b3cGrDcIe&0LJ{S4DFyq_HK;Axpl?(7(3_#|@*`h0
zBm4HZ8PT8EP0!u`lk-8eLm^x3>G2P@Z=bloj^S?x^M#D{^7Z{;-`^#FxZbjLZInZS
z!E8tS4-C)0Ec?%p^C$hIzL=ES{D;5GUz(m|bN$t(Q~&W4!`GL~yKafkstnSYkUGze
zYnAQEmvPyT{Clg!=G%WOZ%>+jL~8TgiHFlT`^$eHkiGvwpl;FT#2?!A>-?tMnSZQX
zxN1Uq?JO~EsrgAaAHP@~*ZuH4M@9Xi4V6)P)AKVYwoP<<DgWxZPQ9>$9Pi_Pwu<>d
zAJW!8?w7v%=8BTXw9|eaf2O^<tTg8<bDK?3_2hZlchRy(3|g&rp+fkd=kvDxZ$z{I
zGc20cU<Dsb`uLyW=J9`?&ldh?NQ|`q5)%4G6x5sT|1<fl^S?uz>R+tD(v{U(`1|<l
zuvZ|`ffK#BBfw{V_@5y{>ugy2--OjButlScBL5jmYO}cJ)$g*~%8GNbXyvuukM-_X
zYjXA5RJP2!R65VQ;@QM2OC~7`_UAl!{6Y5h#q6gVHp-=s`UNWPJ@&0+TfQvL_UxvG
z2Wz~3PpRM9!0}z;ecZ<n$B(^yoX_oUa%OSx)+6B^7C$#0etfav{B`B64Bn6Z<!}Aa
z54iYNPbY0>uAiGr#tnuU8kXmOKmN4t&>ru{`~URJ<NVLCzO(*|MfQ!ax?bnqFQ;EI
z<!;%&rP2JP_LsoF{@be<wz^jG|IvHB*DKS%ZI9rpSJx(*NtNo@ySg8LRL}L|kMKu-
zkB_~NUoPX^ayQCma(dy_jn^~ZxfeLt-3eeXcTK67e&kks%a=@3->704j<QuNPM<iw
zXvI7K{|xa<EBDAtZOuMnJ}sVqYjv-N@V_ZN$NS@ywf{5p*O@Th%#O3!dT~$jk{?mt
znoH&fx|Yn1VD;I|=N|uX-L`$TS8T$URUCip-t^U1_s;RBON!hT4j*~=L^Y!7lU3FA
ztv@Co*%jaNrP9<ls$Pr5I)rih#D3SnJO3FbA7#kC7-x51+SIM#mB1hStMfgMz27F5
zVD_J(y5rP;hR64NtHfLOBrf@J*<Gq{s<vqGric`YNorpb|GKj^n6JRnS^;%10<VPb
z!d~0(2YY%|&GG{GYL~8lrMV<y`>#0@w{)iGs~%RGBgV0`Lw;(dP5l<tmeafRd9(a>
z9t*wF^`&3B{zU5|{oiac{~2B^TPOXk=0C%v^1}ZNiIVnTI9@Q#_`6`sjT71ZystL8
z{_&q`@#o{8*B5JaAN`d+W|g?_;=7uo&f6m9<bvH@B_DNrlE$;nMN>r|7lcl1b7sEL
z^<jR?1=rM%vyV=j75=9w*>ehuy~2@G0{qL*uy4y?zNfLy{n&j$lYSO=o^Ws7YYPki
zEjo24^+vj+U9(N~H+HripFeK<?l(=-#`QsNwAaR|4bjtd>fKz~KQG{qTeQM)ZRU0>
zrPqORF1IpU&VTFqvykDW`eg~rFGnk9*JSU!l9v|G{X;+W+S;VtwFR@M$R7;c_~XIv
z0QR*PW7wPiX}!Fsb9KJk&J8;+m6ovlo%VR{4M_zV<_VSunY%VW`p?iQCdd1cZ~D^v
zw|4e@$;+N^z}i}#Fv)Oo-xkK(fpPBY$L2b{u|3!Rz&1YcvdH`|`)4{cUiq`)Kwasz
z3h75?{f}3qMjNVb{laV<<=cCp<POL0!#3&L3yd_FH!RxuUi`;p_YZ#4^h+b7jf6HF
z6gV?!;uXdh){_tXajpFRNAGS-bbW?=`>h3+3U|M)zMXgfefsI!{l`Bp;7?k#^FIT}
zkJBC>*G<X)vNGb0by}Nar&7L~{E0r^;#=E~?v;1?n74l6tFTXpcIECmlgG^V*)v2@
zwCbH*`ue1*0QLnJ<Lpa6ZVuVp6L;s2{ndq+JSBEdo$1E!e5Sho<NITK^$*`uy!^*^
zVf1UMlBH|Yg6ay}oGi2sZ)9sQ%~-Fw_s5m(R!hFciF`P=@DablBhLk=mS0JXczoy4
zx*y4p{?6OHep&OH7uRQrt<=0N6Rxf}d()Fvg?Tq+eqCT+axrq(x4p}LWP8jGxthl!
zwz1UKTliu~x%#C&C;sGIdEU8x%YO!jtm{@DCx0iVF|1!NI92}pRfej+qGx{?AN%Fr
zXR>Y2W!bBb6w|j)VfZL~+2R$yh4T6bt|e2zWnySY{ndY_aci9at^c@y6O>m#So{1x
zv;Pby6|dL7z5k!VOL@Hmdef=Fil3?eq|Jxxf1B3Ue+jzo81tW@(f-N2itK*}!}kBu
z%wDj7X{WyYk57jW%#z$aUpU@(%9btc&!+xoSkz}!zf$z-Oyx*1qgS$xT1!4Tyl(u_
zb?nL>)rGHqeVe~F{^iHBD<aZQOx;~BDa*&NY|F3IV5#4FPx>S8?)w$XkBVPs54n0E
zIVGZ{mHqCiZ#f0*ANs5&_j};T`yl^lt(pH?`gPbUFy+7tYY&n^Gi6wr`UV`lxGIa-
z(TW}MJ>lPHr62km_1=H?yh~4|WG6?TNG?5df=@!gou%u-){mys8b93jzHWN@(#Bpp
z_DP=7#iyPb6d0d2G-13NB*3B2;P{HSGQVZPNB_RBM?R))tgK&>;eBrIlG}G|;)=di
zmQCCtx997BhVu1`yw|=odtWj+t1NKuthSxuI|35f7uMAU6yIymy#K@cu+=i(@9kes
z-uTZTeQRO<dC7G}pC9aR$T#w9+OYATMxLrq9plDd;ektjEOxw}TCrtr+OcmTk||B4
zWz5$Zp38l$d-v_S|JLf&w`b(8y^#8J<{_=Wo(}@pr^oy|Z^u1jvdFHrJKsKy(LDb5
zOWW%I42K#wHvhA!311ietL~$py@K{7ll`JUs-t(9{fgVZ{GK11Pe6>enQZ}I;rsBV
zSJo)*Z#xuyJL7#C>!R(IA)X47CqvJ9Joxg(gz>5Uhoz<!{}~?Neq^^(e7XK#6_NTw
zb3X>vulliiox=Mgd;T*#NsL=vzbf-rl=3U_Yuh@NbKG{!Oy;@Qtder&x_Q{QuuCy_
z&x;7S8GSnSFw}jedZUA!=YIyS>Z83!_KR*k9`ED*pW*n6{|v3#Yd!xn$jb|ro^=1i
zzx=V{T6^j2a_^7A4<BXjnseyTugsqA+=80btw&Oew>&(qE?aed(fus3s?^1I<L@5K
z&YNEq`GjNfD}NEjC;u5fEYyAepP~P~MD}#K{TA})O-<xCA8%fqdpS1iJNwGddIIVH
zemchWF8MLp<$8CodibV&DNnE6D=k`lrJ+dc>D#Z%W@mj5za9{Ed-=U-l}t*1Js&k#
zWcxS8F>FzoD{$`cp6f>2U+&<)v`d<wxgyv{;pUHG@6LAj-&$h-8TNIYP5g2A=&!hi
zA3h&3my^5xcG<4&ThjScOKeV@m-(|?`E_(1%g$Nv4@j|Iz4FP|W>$^9%BG!ZMFB#d
zHk02wNWbWmV(ASHDG+sISRSUjzo*8yd_}L-{UcH-+grDF1ih5({K6pTzcusbE3v2^
zf8E}@d#_|XGpkVH6KxMwk>mKXfLkO?Z{ef=47@MH_a|R5eQS2@`JUidr47HDD*j|A
zX(rE7nWi48Tp$_i=gfSe`CfyCeDD5G9;Nqxm;YzD)bV-&>bOTS>OjeLv~d!K7oaQ-
z!sLw~u4;cTzcw07LVC@j&-Us6IGp-WU)X!$>BV>No|RI5-+G<B=y~I{jr$+g->Sc`
zc2!;S9}VUkFV<UqSbr>^_s8^LUn7^Dm-Cn8PF#9HI(Gjq3Aug8MPEQq9f<gKzN;jj
zKlaA0SKANW%l@vJv&<~turaWIicHbz>$2HN4O|-Q{EzRm+V(+gdUkDTs_CUWzE4^^
zn<kV^o>w=8@mioq?_SZpZkZX_HX$;8_|LFLG;Ou>zYU?YzzrEtSag9~xc>Hc<)fe-
zRxBMr8}A3t|4f-LTYvp<{fp--YGN}BKP-P_y1)C&jTtYhX3u!P!12(=r){?Xe*JE;
z3c1g2|76LJ*Z=lO|7UQwt-9V}qOC9VBD~7=SKjuWzDxR6Rkg`Ic+kI;q2fQovE93?
z|HdB(pSEeuw$G<Nxhu_+^mcL-E&tZMcZXG8<Dw%^mG|!|t9Ra<A^Gjh%s;G^b}QvJ
zFRtkK_%Ll<Ij_{8zjOBnxjP^Kt)BPr-^cZZKW;y?zAso2cJH@u@6)}Po*HvpNS$S5
zm%DFCqW^;o=KUAH%(mtB*m_w<DPrx~#T;Gi@j<noHk0qn`Z&MOY~zpWBXP1H%jPYT
z{%9B(Y4O=CH&y+5?}1-Ed{?hlL_fA~{pu?uX`1U~YaQsq^hk1QgO1fyg@oppSHguK
z$@>2jH$Nh)?Rhb2>K9$pKc~BwIW25dE-t>T=(NISk}pI2k4^hOOsEO}(B8IA;&0=T
zq7yTZz5OI`zwgG!AJ@0LiWIm^bm^AKSQsdhpdrg3+X@Ptweo?Ru6*&A$t;+;<kDQ%
zLydD+t>5eUpCMhf@mJT&E$@5kIQLz;^eHzsS3WOg`%LLG&UX@+cYip`P%uyZqHan5
zE&aonZ@Fz;`&;!)*J9=~r&r5Vzh!^r>-e!hy0K34h1smRTdr4ywrCcrE^!dnHa4`8
z<8xZ^D{V7>SNt(~*+2HHRrhSTAsMEvZ(0zisyppZbor;ZvHeHS+WmI=7}ore>Cnw}
z%9l@ceH7os<e`2+;M~r)M;W~AY%?A@_48#rIvik9IcEQU)w6x#*#`TM+4^qjwR?5k
z{c2*F*r#O|`WSLP9(Z17J8Q4f?Z@nm>r6hDm7aL`v*^NU=}wVfxeQPK8i&})R~(P~
zxWCgh`y)s5p$pHfeLp5Xsp)_8{(wr~?S=*X9yn@dM)$z5R|{Tg-Nop+h1mM4YQD+@
zch;G?kKO;ISke9O;B@<6nyU})llW16{MP!`r#n6z@6_G$nz7{7g%ciXzN@D_l-?-&
zJABWN+y7kj1?z7ZJ^#<(YFcoA^YcHGclZD1IQgGpfxCl5)%hkn1J9)~Y#(#Xvo}2B
zuDoSFp^AB9y^?G8AJ)AWmVJ2pb=}<MdzUR`>Hc<YlJ8ry;;ow-8xQmDc>HmF^m_gu
zQOTG8nN(c%4HArfR&jYbw>d){uY=6l`MqyS&iB+QUjI7R>~r8W$tBx2M+lft65Ymp
zQo=f1zH{B-kRMkc-c9z|d3pUF{WWKE3nlwpGA2)`e-K;UeB-M2?86`CKl-<LWtP~d
z`TVbM)^BlZ-LqZ2@bMM1cUBJ@<)+@s`p+QoM|ydt(cRlw7w?@Dn34N1?9SUh#uGo?
zSDG*$`lGRB-$(gF_eClqXL<N}Y|EU|urFzH|FWki|1(rvu@m`tZ0m>h9&+*@&0F<Z
zmk3#_-eF$%Oy%?U_{moc9y9IEWe@*&Z}Eq9zoX_VO5JHVo5->KSh9r8`l177cB@18
zNPe8|c;$=6<ReurVOD!8y<4Ukt$40J^|7pV>A4Js3t!K-*V%oXJ8f&O&V?UscOBX^
zJ6q@Qd}Gg^BYa_Ue`3t{W41=R*^k<rznnZNa5uNhM@Dv8jQnNO-bW|5$cz37{P3zm
zNF@8n+_-not|?0uymFRfzsuN}UtpB&{<!t%)8$9>xno{l+_HVcf=L`#QZ}#I$t?TH
z-HYLSg+kl4d)CzzlQfb7I;D8DRy<&^=KAoTK|I?1Vf-!j!?XJ4ERug4-kT?A!E^4J
znZ<V&`4wl&Zp9z7XTATjv$Sy4+Ma{Ox?5h{zPP)O{p^o$&ZRNlAHyHpuG^i-vT|WY
zW0}|M-lLwW{}~#^EsuY9W$<1T?f&TgEiYO9z(C!;nOvI=-C3QasK)U<UGgil=(}pi
z*c}qb4C9nP%H8_UaNs|K@R$D#9cJ6s7cM^`rhQ=kp|YPT@;>VfYF*3PAB#7(&HVQM
z?NWQannvNDJO91AQ~KBT1#|m{ubV2ozxD08_~n7phAW9_2J7#}9Dnw~I&sS53o^mU
zm)4fX-|7z#FVtC?D;1+<a6)f`o5=iymcJT<{}^xC|8e<|x&DttpKiW=<Kd#s;s2Dk
zC>&pRg+=&P-B!D){~6R>*Zya?KI=cjiy0Zr*S2;j?YgM{V6K?QlI;^?B~k=Jo`g@S
zVQ<{Z-)42R+vC)xduIg_uR1$RJpO#P>fYl23<{_B|9bSF;TPw6g&!jN{D*l<GLN!;
zzpeJ$vT&+UrQzH2+X@{d;zY0gasQC1UotoQu()_s$>f=>Z?_fBVLg!};3iVwp*-tV
z;>Y;I^_^Akrt03l6P0ys5_86O$=2>X7T!}{^BfMS?>rtA&r*}TY1XxBeyLc!*=uf}
zIdLyVRj_?I^V7r=3fE;nCI^1l-d)P8b@`CM@yhkyh3&J>3GJTJ$1KBp+`{rXgPbcv
z=AV+QkK?5(vVHb<Wq#Qa5_>*hJaW@hk6#Zt<ljr(>3@8zo5xe8PU>2a31euO2&2Ob
zhPysLb}q^)<9~Qx^2U+8cO`EATN{jbI`T;TXV^U9z<-9l^N#I`wSUatq`&3K?#Yqf
zVcOH1N)nai9y7@NlU%@W@<DGu>-pnudoFIdy_?rOtoKreYJUcwz)lO9qLcG~2(Dka
z`mBxc<J#=wa;8_(H}<$bv<|%~eUeWu(tzRl=bSQ$^HE=Rfh&BiBlfTUGf1ruQ~b{m
z{^J76lt9QvX!JRz>kGo?|H=K&Fv)q{e}>!p|4ngskY#T~>b7Ma@BHJt?1#70{IIKP
zpB{81hSa{4-=qN^5n8ZHj(_jg4L@=p&RnmTxV_`$w9I|sT)StPFr<7}X^i@Hu6^l`
zwj+^Q-wf_Wvq!QNt#e2y;`q;C%PGstP#NFwCARo@omTfF`Oc{5=#0&OCnofTXvsga
zfAgQ=h5O`B59CvpgoZeDEDUvHU}oTo0{aWpkio#~s*l^L<+E2<{f_evEXnnW->JX3
zAXAjz!s6lPOFMe`zcqd6Te{U$WzVIjwmutwPW;bs-*fWM6$|(yAp4NQF4ed$&c0YV
z%Wq|R)sB;&)WWpCGEe+*b*B9_^J@*#q3_?nE!oZae*JO}-2|skUe6gFR*OEmFK4Q~
z`u2SOA5Q0-mIuzUS^WFR29JZ^PA320Vwm_(@5lTj*DpIh@aLL+x?*<buHEdJzaFfJ
zUzl^H_)oyc{|v2>>L2TyA4M}AFu68Uy7=F<=P|!!()K1hUDVlQz3A4<s2V5h>Jlyk
z%jCQLE7d1uU41%t-@<g$xpy>8_q<4+WG>0GB%#?sWRLA5UhAV$CQ|FB+t1F`UvXHO
zZ`I+A^3S`T)d)X6y<W6p{*hTe-vSQGO}6Zmm6)t>@Ac$)^UJ?3et7-x%Rh{NmvUVF
zcRRD})yE@=H=hblob%37MS!3AUDv1eTopV2=sw;rbv%99*_E%h|LS(l%oM9DQk^o%
z&w`<T-V%mddDn`4OFt?<3U7Vv>waV&|HghVJ!TXBX)#|;bIRyHVmEztt891T${$s{
zWs*%dPUnB?eShreF~3^Fb9GY~m%3JbsTZ+Hf3$mc+XwxYM>{t(o~m8;^@GfRhWi<e
zFOPXX3Atr{)L!65#kTW@_gQ@S6IF73%}SncPu|`+&i%)SL%pZ%QG>1ck$p;=KCEjl
zT$7~~oXr^UNR{WlF{=bi-;>a}@*&%=eW~^hx?r(=cV}vkhrw-+6DQUy{kXrjVoGMz
zv`M?9<}!3hZj~#NE}Bz%{N90=_qO~Kdi_s(*Tzd*PQT~Z-PbAF@hSi3nHkLaHN{^R
z@Q1Hcf0W;+SHr$ADobozVXVz}C8kLm1K6(ReEj#e?CRtDTf+ZzR;B-KVgJ`%#n7eS
zd0#NSRjvEsoAokXl9E9|5^|0*!T%W^ru^~z&#<V}{_jWqAK}Y>lx4KHl`Kg0|M9u~
zNBeiy%%2y98@1~1J2gmV>THg5)VcUGXiwI8`R@^Z?hk{*c|TeoH?L#4v_@YyH(`gl
zxt%v>C!^b?;^UV#sQ+kvJYT?D_juzUb>4H6dcv$vKfC?>!GldqFONU}QL=zPV$q)G
z55FJTJzqFme4hOEl7q`$m#vCi9Mf9B!QOmAS>dat%(dx9_&cW6v2EMl5pDWyWm#cv
zT06_<-ZPa4&NKH+zPyt`cB$y&=f~qkO@7#KHT7q{HCNZ|rx2S`s^4?DWWmN)Q#U`b
z?|l2XUM#DsMr`Bp<+pElE|@-1Ic3FRp2y1S%a<}_L|5GUmEZNfPB7O}>=cJfclor_
z^19M;b0hi>?XG(8c29Yw{n6j|*`zI4*9dUBJed1r#>I~>ulA<@*!^hzA0;{4Znr&K
zdbeCo%aaQ|Ic3}RTU}N~rxt%)!0)qYPu=_D_M-dF{93!e@omy&rl0d-&cqeWd!@oV
zr+#DX`Uac)?8p20>oX2>Zr{=6ul0!O_BJ!d6SfE0`;U|@;9J-AxnI1-Z^z|7j{g}r
z&bO9KyH<L@SgWaKN9}A`f1ghb=VLxb6#w!2c)eA<Enm9Ai=*UbjN-hbn>XA2zU%o-
zB@J{O!20Cc+?5fIwdqD@a|<OVX^Jn_lw?~Iz;dA^Yu3W%#UWQ1h?vBc-Y;YSWa4Z4
z_m2M=7Eju<>OX_a$8BpL{8c_y)w)IK<u089=~yXO9hGga(n}NXTd+KzyJQ}N<k$B-
zVdAsqANm`0*?-HtcTaE0PPlz6rR>NFKM^K>hFLH6=;wX-VeYzZXX_-F%MbcImc)oE
znX-D=^5+CFKCBnKQL+B;wOz+%{cI|I%73r7JtV)N{`kW6np-{`?eF~0kRdO6bk)Ln
zhi++C&bl&rQ>4v`L(^psoGyHob+>Qg>OXov`d%-2x&8V*!E4ba6QdkH*LL}QJ=EM_
z{A2yfs9PV-&0G=vC-395g{Hk{%EHCIJ(6zkY$+1x;0RM@m$0?fVCFtPPySMk_T%$R
zS+jUsq;kGXEs8p$8{oFd?aV62z+(kxjNhO2UVl@+|DOdvZ&uISMH$na-oAAf*z9q;
zGN=B!>mQN6XWQ&FUe0P;xc-Njx3=``8E?0)SUPuM=Ny;D$vQ{)rxaA0Mjx{>+x>8V
zhuJ^d58vkeS`rjpJ3VM)_up)FiB#EN`ph;f-uN*n{h0mXyVIr*wdZy(jBs3%wacq}
zhMvv@m%1QM_B`Wb1yxV@t4@Agw%GK3OAY%+<utt<wbu7$JZY8&@*nQ(kbi%*U--x3
zBldz@rhPcyUZc7C<;$v2ef_N-HdEScnjD@yxA^tgLgwSWtT^6n^B*p`C)ob1x8!54
zuz7jt&xrpF2O3NmpYNB@N%=UvV{M7HC%4}0#WAnWZg^fTxXtkAivJAzR>ezZf38^n
z(7d(eb*6FkqsX;u=k(q7Fp^(TD!F<-+m^j~k*2xXM`o3tU3%Fuce%6d6<ukGhdoRM
z2ZSUZJYQBEDbM#ub!~KwW3;Z{p=Y|%hkYym&NkU-v}9TM0WFWiH9Kue9yOTRlxFkx
zADQ1KHtppm4%Jy2*Dr>+G5l#Qo@ek;t-bzZ*SqsaPnH%hdU0XZt((rm9+P}dt%<!-
z`uj-rgX-s-Kfar}BCf{!1MA(jy=T5%+b%r2b5Z9^MfTGN%b2<6$nBdNeq@@|{boC5
z-^af49<#$QZ<Q`>4&1s;a;9Vf^Q!L;-Cxg1kz)A#pFyl*`oVA4tMc|fh&6lfckRtO
z(Jtqt3$8ZtN7k^*FISkKQ~K-bmUw}EW(OB!x!!lVWb&~pms2rq;<g9d>|ZbX$K_re
zC2nW*F}zn_XUl!Aj9JIp+2^dUUim6)wfuC6S1Z=;FtnfV*F0gL$tBbKynh_Geth5l
z<)*2}y!&rvP3^IN9cgXUw|vo;cZ)Y#Fs%O={_uSJq7QltS7l1&Zkt(~%sDY-v!lR7
zD=UK-Ca%fX=lnD6e-!KX`JX6z!yfB&>*%=^Q8!O(&2vc&5&SVN<~Nh|)qd%U!$<5a
zKlJ{a`0ze^_O_QQ(d(A)-KJ5-=jFCo^=1ciUqMy5l-2$w)7a_<{~1~zZR}mU^{<?C
z^$evsIk~TzHOgx*UtV7-d6c1~T%NN=TEG3=RJQJ1W{z*iD?0y8=<Sm~^ZUs2d4Aew
z_wcWet4RJ<|50x>Ps@GhdM%Z?9uxm8x%<B9<{#~~`5kroSH65+dgBLw_?GLb{yOK2
zCA}jeKBU~MYP`aK<(mAV`#kGA_Hn)b#WGp{m+zO4OKzOFw^}mnS6)B{v-DxB^M_8$
z^E$GvUmTki)$_4^@9rI!Z-rMzq;~Cm!ZW$a@b$G_^-Wc~>N6_Z9X4ifzo@%3y4U2c
zVwlH`xhKyrJpQWs$}aK#ulKn!uHEAA*dxB)zdl*&_Hnzp`OjSoRv9cWbGKl&IREg{
zuKkRjk50e%DjR*2ajnq1gq)INp=Z}P+gCJjCQrP6d0U}_h#ks&#Jc7K^FR69nEq!-
zSPfdddmgn>8x!BQPpo2f;D>b+tMflvM{aI<bV@RLTaWiP(G(ViBL@<g`<j<KI>^0r
zt@zLIM~h#g{$k{Hd(Hhn{>pz?VUzfu!8JYR&$9mvkH0X!5ee`A&v0nL$M(OXbBjOC
z{?E{V@jnAs^D+4^VWsC!<^QvHkUNOJ9-{w8h@8QHhDDp#)dkmo{PoaIxI#GYqkC)K
z`jY)JSy3XKRUb{auBcsiP~g3FO5h|3zWIkN7w|88v0nZ^L#yLs{$JWw&!3wAXOLg~
zPn5mK{>7Tt^QYYZQ`cbI)b;T{1FGTn4+AO=|LZ!p^QSLp5S#gi1|d_d{xfvSS>`ij
zm{qOVD85qn)oq))v_m3`w>chs{zP=<!}Gm$JRjLkJlSuRdb;pwJa4nz`BVQ=YPC<<
z>HH9m`>?+?H^0<hCM)U<f7L|Ytru!n9F=)5y(DphNZ<S;mFq1p*oZ%}7qN(y_Sm`0
zsUVemo8i~8L;dF(?Cy0%Jnu~Ha-C!@8SXs6eT8Yv-^X9q%lxSSP=2&dZmM41o>;qC
zGw)ohs;+F@fA5}I`ah<tZxlos#B|(Q`;2OTEMQ&KrNMmIl>etyH~+6^_FrbMzVrHd
zyiDY)@BbOLXhppUUzxh{q)??e|3!|5$@BOV-2J{@{?Bko@?-nI_G<axFZchr%8>T&
z!nd+OUcO%}Ay?;xJ6!CsUck`vcye=3ZT8J&R}wF6vETmc^3pv8PI8ajEw~ELzrMAv
zX4B?fAGRNzYiXzKuD80l;N&;!%SCZ2svFjB>2t_CF7dkgXleZxyX87kdfiLg3iB%q
zKKTf*<~a1A{_)S_D?}J`|C!#qzh~2r@&muNtzLfQi0Oqp3m)GLZ205<FgCsMpK_+0
zWnaeHNzuMecha&gKkjrZoKW5K@kZZ+lyLFoe{w(ko4)+^jmV1l$hKQ|SqhJBRN!L&
z!?n}!(d*do$9~Q0UsU;iZNH`3ck63f52Iq)jfls}?>78r*yp%_!%nT@S#I%>e{xg5
zaKAQQdUt2&44FHBKK?z&{@wQ9hxN%9_SF44-uksS@3G_F%g1I*pGes8a4A>oj}vk;
z))_xe|I_=A=|98z>;D-pPPo4G$K`)6{sR9Qprd3Ah2r)yTe9=HBVNr<nEQUccSp&W
z31Wvf#-6$HK*-|3<d+ARzn(9&X6--u7yp#5{V{y@cUO5&q?faV*K~%b(|5!>UfX1!
z{-1&O=JIy+k1Q9KOuuyQ!eM2e8H)pq#4qbaTYOvo?_&KY%}o2RfB!RVxz%8s_v+WR
z!+w)~y-V4?wawnhQ|5tE<E&3N?M&~+pVh8NKNz)cW2$iCx+zA@{i$xp9~Hk1t<T^M
zs*Y!S8CBwQTK3J)>GDTbENHd6sql|!_R^Zn58t|%M!no-Cvh!l+qNFJu8j&0%I(dA
zugrVaVC~QR$MaH6UfR|!&77^)g)?)W|2~k;vhp3{gAW{E7JL6_W&O`^MEgGj`<={B
zw_<N=8+m<V+RWHwV0bL#-5=hHzu$Y-miBko$$u2<ToQl7Bdt&RzL|l|)wEmwr~IuY
z`f6EA<Ha84bN%>y*v@KukaDE!-kX6}c>b+vJ^qI!fPLfLyUxj}ws#M|Y2&yRDSa^{
zW&Vk9^9S})`h6vtUp>CO%vt+*#-VvRjolL%RT4^`@SoeT&m{Xl!_E7AsZCeE`>)y+
zb$IinQ}>MpUvc%+A7~B!Xu4z{;}2H7`B`N#$8%+bto}39g)=$N{~?_GVF7#Ae#u0u
zEw}PpYiw&DX)dss=AF}V{KBe@%J&(ngu`vrKjs~ezjmy3)ywiIxpl`m^Gk|k{xg)T
zX!5>sI9m|M@MG!13j0H5vr_8huBObNsi)qhdSsn{%=3vojBm8nABFdqyq9!d9`mwF
zZDsoE8|UP#S1K_-dGc6c@`T$SU!GrUu-$wt&a(O7^;xESt_8)+^($uDWD&2^v{+qv
z{@!2VmiEW8CI2MnKKv`}H9yFGmv`lQ(Qn6Q&(KU|kUfy{slejPvz_*R!KYVe)N$<y
z+?2iH9J}wD*Mk4j_Rc?ii*5eHxnlRdFE8*|*vg=y$J5&vxz}?6ub0N626_Hp2g9;w
ze)Maue`G7V<*#|n&Wm|cu7%~fyDOMi{EYOsk9t^7a?vVNXP0SJ-eu}59(_4a_u9E{
z*{Qp>+a+yq*OHvxWFfmbu6tp{c88B9acRA)qwW<>>iAJA_oCqNN70XcyO-W`e#{&0
z{W<;0dZm4{*k)&D>dD45JxQ4}$@t3GSJ_)W>_2EPoptGG$%kGk181G<enF03Tz@wH
zdemUI4!k}yqE7t7v!xHu>O^P7sqDM9<N@2xP{z9U>3?oqdx|k$4mw){gabBS__EgM
z)uUI3eYb4evD0#1L}ExLU-2}B;*;vL_PdHO&a8iURycHT-{*i!IeF_AF}Ryt(B9Qh
zWZ-e);Il0+7ByJjbw5xia&_IUSHB%L%zD@t<*&h({L15)|Fx}K?rQsAeCxVyc5;3?
zgXO0OJqqX7vDa7abY)Q1NYy$Z;KpJ5dgkPZ*PBy!nd?7XYjVQR{~be6{oR+p!u?$t
z(zdsMN#C<|Pp;O9JGV5vD{ETsa`imWJbsGJ-#_YNYPeJ1!l`W>$_iak8q6~$KRn-@
zy3?E=Y>J-(qvwBylEPo%?yP&vHmHbwIL>3(yHX}gG^C2bLR1y8j2gDw7Dg{#vAT3_
zDfgWyU!I~Z`YJiyN?)Da80G!8J|nMTKt%gO^mvWO$HNuOj+?K#cI@@c54+{@=WL6n
zdroun`4id`&>95mgdd%6G=0zgBkbDukdMcft~>C3rO@qI_FFxH=RWRolVtRG`Nw@0
z!`Jh?HK`x+JI!4GF8n*U<W8mdjsE%R=Pv(zerxyB)KZ=$MiVDEo>1DS{NZ_HjrXJN
z@;83D9{DKTY|T57|A%x!o#feC&JXt==6BktTs&L<=ypqo%v`JU&s%;AD=5EiWxsHN
zeYxDSNmIU>fEST4${rE88||H6DCEtuBFN)TYOKdWmU5QRT653KTe6dLmqpyZc_3nE
z-#vq_hzG)r_ZxqPp3-1gbLGRgUb%I*wp4}A%JT6v&t~fMbK^hT^X1W&6$`k&UlXgE
zyZF}qTZc>Y{xei*F6LYQ!6r;}{^9o=5wUBpZf`I7GSe@#M{(Qy0Dk{NyP`B0*6;=&
z_$PjCo$tBD3nR`K9M{$CV9Q@AGymB37YlgqDj&XO*C_11<mI$!UiYg{bZF+Ud{r;?
zX?^GZPmzDv|1-oc`p<Cj(B63~V>5oGzI`LUy{dHS(Yrg@IG;AgK2ms+n`C79yo=%V
zl^V{6u~Bc=z2CykxlZEg5`%^_wVtx|Qa3kl*nC-6=+dEU($OWqH#$qW$~UYQ31C+3
zvV2srtXHM?*fsY6XMQLCsAu~h##9);wa@s^(30(Yf6n6*()I7>&0kt762Pp~Ws&5u
z$hFUeF;pNy;|tR&weF?ISD#$1HaBZmPxsb$N4!)M8w{c)Z5Zsi_PR1CYwZ$Akc>!Z
zzR<ONS;nut?!0TiVq3RP(ffVNx}qg3njxj}K0{q-Sr^06tA7+9-mObCTfOLJtdGU#
zl#cx4%iir+{^Fn5g&MmvxBMoir@b+_<SM}DzA$gcpX|6U1`ZYhH^;XzXOAD=&mJ2q
zJ#&xwa!JpVN&*Y!7g(5H4s;N>bt^jJw(8cUd%`}r8BBC(6m{Pq!!-+f;QMNU{|u}D
zWbKV;{~Q0<K?s(=&_|!+8sq9eZvW@WxBEXs-hYN)fwBx0_u5zAtoRixb^2)3)p=7N
z_Oe}*2s`_ZY44SeM$hAMh1WMcDDqstc#CYk%k{M}FUqfdFNmHfT*<CAr@&t8=kmi=
zdi@W%-)inDUFcl7=ylnJJ5gB<`xNvVw{7EK;k}_O`OZlDr9a<~YNro>XX`bl=I7=X
zc5*g6O?ljQ&aYwt#~<IvyzB?Reehp*zjl)4jV10UE6oLr+)pf*?=32-DgD4zdpD@k
z{_d8^zZzq&EnW07>P+GBRhiq$PfFhDE@e6;@~h#%gR2pL)E}Oxzgki6<^MhBvHYb5
zQ{Cs@t25RqsT|j0531!jvEKChKg){j!?)f(>n}`PeYQMXvG>o6WG#;`st5mSUk(iw
z(P~)51{w!vYVc%Ws(MhmfZzT@{n1|gV^uFMz4=n>ciYfpUg;?fAD+U4HnU#uab9(H
zQHJ=ese4;)wOo6-GdJ<vPd|qP2kiE>ZF#YPAJnbb{g`wA<NFW8Ho2!2>)C(t|GM7g
zL*K@guXnM8-`dx&H@V{6scV~ryc#NO8n1*s6=6J5aX;$gy_qYbogeD+N4+Y0FZMEJ
z?#GRvDt^DdwevM^{$t-`3v={s^cP1*&J6K6tu;;XNuKie<8#utRvi^#2%mU<Sxw%&
zNAb$B7j+-$m8FVI*!lQhq}<ilQv%q<AJpm}pH+MG@sHCn8?Q`q4b0oNLj1JCoXH)R
zR~%pOb>-u<{xz>>No=|Bb^0ZJo2T_#R;avXcv|uG#Glm*7B$$F|LA-mFP`y_<;qsu
z=;PaW&0S*`J~QRjLzOct7F!lssL0J-eX_>#!Fhono9gvlFFoHGP;lvSPUhF*2D_PO
zYuB&LoMv^jWtvs*n((kmqWV0mr{=DaNGX59{3GOpBkvkc);*pg8U_=OEev&Ez`T^9
zqP)dEz2nW(uJ1>xw)7ea9J+U~i`DK^seOZd)VGY>#hY7o7v6t*jA{K$W$T2h%|iZ<
zvi?Gkb=>+eULq!QznG2m>hqs&yIq)ceEGZQckVCd`X2SDPNPElpKw1<?YujyvclG+
zZcTe2+#{mCA@NtEgX~t(hyTQXoK$bi4L<zO@^b##_iL`W>9JJmEpFT+#8>#o+46PR
zAJ>n^uKl~Z<vd%&{kT`#6^@Iv$*`#JTCY0$LIVF&2igA&?KS0#D;7swDLHF$$K+O}
zwy{lxpTv50n@_xz`wwY*r2JU^c=mROw6<q1Zp!c4W`6Gf{Ukj0!}J4}e;6O0v~OYm
z+vx8?cNmJFHhvGbJ#kXy*W{P8p4svLXE^Eh@jt`gI{Ck{x(wy@!c*fP&hKDmaK9<>
z`4xxKhJTW4TkbRK1c#+cRY%`hd$L?>U-)c=oQWqS*52=&|H;?J@jpXcZj4;#()jH7
z&0IG!Pn<7TG1{c`wP3r?QTLUK9>@1GL|m*>&I}Vv_xw@&U~hErrpDXPdW>9m%$dA%
zyIlRPqlN!0ulxyIU2*^5cRQtvN|%&=Cv|(UEONKs(_fgx|9i)S^0n*R=Zo7IKb+5Y
z>&33Md+#oNI_>MkAl^ex5p#aaRi3j7xBkd{^!h%9kL+!K{LLnor#`$Ly+Ai*rlHD>
z)bgC_2E*IU=VKVE_8&X{Q&npF?+f)Wo~)hU9{<VD#`Zr$oM8P6)7Fo2`9Hktc<#q#
zd^~Lx<0^W@OW=KqkhXCD0`(7T?zhYrELj`vd+Cp+?A9k$y>sVXPj~%v?hJGDZo9dz
z3{_hzzP}ay&!7}H^FKo(<9~)l_x85!ss7gWpJAfD&wqvu4gVP~<~s<yjAz)RUAy?k
zJ?|CSN}g{YPi;CY#+b_T_<OD8T5bCVJJC&hx9{!`xaKp#W0OYaqJvsR#=rN?2@9M0
z@jt`iFe}@sb7rNjInTPM&@iN*WmQAr=ij0Ge`L(({?BkhKjJ^b{=)i?C5-%U#s4!X
zEt~kCA^!M(28XP#XYDS@+WT(Zyv<>PqtJ1w#sdKc+Q<Ic-Cg>hq0z>^Qfkxj!lF$p
z@)nr5^!)xDcWwTw>n}8zk1b)e&+^#TlfU`r#{1vd|FAy|eH41lK1=f22X<dOhTMxg
z89Ju)U%k0+Yf}DCD+~MAXZ|z1Xj}ZquX9aB^6_o)f+p7HdwNB5zi4n;aBrC0|4{Q-
z;X%m-{ORlYe?+F{FaMMNaq7}ltxbwri`Ki!6+Ec5suNwQe|)dfrxVVLGJegyx6$A>
z@5+wk`kg_Z@{Z@XCe(;;y`u9^{l_%jh~^c4XY4)6Zoel}boTt1$qrJp>i^7DTl=3O
z4zx)@X@yN-ChzyRx({FE3D_jvRJrBm7T|UI*NT;2{xkfv@{Rtt=lXvJ7gx94{~6S+
zAO2_f_4z-;t>OUo&DXnYIP7BI_goc!>b>Pw^J{(oQ+t=~Nq*q|r`0C?KSTV){|qmJ
zF0c!KIDa%w>9YE@t+vL8ieGOJzs$B{r_)X6Nw+J4uWYN=dQ<vd^2hBXQmo%5mVP+*
z{nYN(MLOPdm-I>S&UHK}_h^-4W#xz9W4o5$Hu>G%&z-fYa-u)e^HY{8d=)27vIrS5
zC|}xI<NR^E|CN%RCigj_Ux`;;y)|QjHk-|&io1PF`VR1<HCV|vht1kI_W{d>YdP<9
z)wb=NsMPn6Yx}f<0}sBPHh&t|9AU?^@tVnh2CkdV>+jj#TNSeR@NVX<o#*cKJ2w81
zy0-x77<}}NXmJ?BU!ZX+<fA(t#6ONde5K6oBb%fj$8Xu=@xQ-ENtqw{&%pZn`^B4?
zx4sC)oGLpm)A3Q<uDPmbeip-P<aPYo^FA(b&)ehw@cG7#SMy9xADNie^h{it&-idc
z&q|h$Uu;(|T-0E5>e2N_U%$=!6*Bop*6lO#x?eS-;*Im2PVG35Q_X*9H$Qtt-1UR|
zB(_}Cm0c3SsISc<DKlqt@^;(ZJY07nS4r8RZ3RQ$2}Z-ME=yNt{L0%bVwhEC_I;wW
z#w?S02CvpJGH$3iC-JW|S5Vp2B{U?kVU+>H@<0dH5c`V1S$9EX1|w*0{KHx2e}0P&
z|J&LBS1s$i)uLN!lYXswwRTyg*`meVWw}?*SRCi?`SN(Z*IoxX)!!~|N?SfWTd?}~
z2|bm7!*{gn-)!Cgaq{8qcIqE;m#tpBa^7pd39&B9lKR5@voqdRg#F>I5?j3F<*p;#
zY|EG5iaWG+Z}QI8lz<z@91<!I+Ru$>x+fi7@%HVbJ_)6OiN~c{Ljn&l@G&glxBKw^
zSUzXgE*Il0sh+CR7mm+wpIKwJy>_{T^5y+2Rz<lVvGqSzFQmNT!vWrhyIt;9m#VNo
zxA}XiQqqE<^ueq*ru>W*!J)geSKsHq;cYFmWu8aS@hgud?W1m7{&JdE?P6}pYYSc7
z>k~x;TVj4aC||%Y|Kp>^rfYk|SDae=EjxRqlsmJ<VV4`f9{N9xUAFMiUH+r{`TqD9
zF1+`5l}5?$mWg|o_h(DYc>IiM*X_4<r-QedUH?}0y+C-f$VZ(n{|h~#)xOMb8#d27
zciwZuwyir@e0j`HbZRKG7&SH@m$|^s{XzMk=M?#`U(G+S$-kxl&sA6b-~RpIW{0o6
zk$KH3bj8-oYu3Jbdr(JdduOtPe{d^9NLAm&W!VBttme(ltQ9hQ>ODg&VEHnKgn|s_
z<{yV2daoC*5VAD+F)RA6?a@PS%{s5Q^DMs4BL9O=vqt~p>_fkn^)kQO)s=axOF>-V
z+#0rrS5_Y5P~RT)rGY_$nIYVEWrg^mT}Lm+#=j5~jxjv#mBcvX*CfOF#~Lh7%Xi%B
z@3B+9^|DS#>lg2)_WipjF}F#ow5m^>CsMAizTj->d_f!Q{|ub&jit7x!llQjKD+(v
zXXj@P`Iy{4*~Pw>w)~a55cP6f*tY`a`Fb3Ocz!)7|Gs((<LBv3rQH1;_cT6~%`NJ=
zc28!D(H*fD=XzUyec15zKSS2u?Z^Jf+i8ACTe)~4Q*_7v5`TX~kJR(qo{BC`T=-Hd
z<kH#YnPFx{Xa8+|`}UI2-=A7+^-}vC<cvSuev~(PQHEee@=+tRsVg&E4*vd<ux`Pe
zB6gON=j#|{v+j59)An7J^((LO!>@?<Tctbq*IKr030(fTKW6d;#`kfpYbyK?|5GTP
z9Cq=|yC;E-DjS&|ROP+yn(qGLUiZT<DVI!kd+YvZs5mST2JQngd|Kc4aN6sG3uhN|
zZ)e@uGw1l(YTuy!7r&ehU6||cw`Grk*^AyI-0IURo|ZE=A9(pP9J~q_bJgv_<VBfP
zsYj0<?VF#QZ+oU!=hJZker>~(I(Plm)?ajGa0Rc$McPX%XHlP_Yjako@JDgrlw(Id
z-Gd$fF8Ffe!*x07t4jSAx>Fy2KAQgTy#2@j43FObdYS*{+4_QM8qYf)%`%;2E|F3=
z;qZlK2Z=i2kMp`8`&zCy>2CTL$rO<iyUE4h;qb4X{;K=yS%1{BSed_Ne8j8o#aQy6
zK`+&C`CSLDIrRskwX`>Vv_B?m?RM?9%f&CJ*OrI4X?(afM{C2Yos6A{_Zc3)c95Is
zT4_7~*p;nU9&Y*=-)73oaQe_`vuE)uSwkM*N!x4mIb`GIFEdNMXV18FOk>Zp*_N?0
z&be9rPF~9pvuIC8P1Y4tO_j-q=doPf8m)1oe~V_~oN6rt`MD+Y*niAlR&nlKt;MbL
z%d<8xozE;hsURb}yyx)_1u2FXrRPOn?312$&Nl8xwC&?5+u6)=-xyB()ZD+v>escS
zHP=5TAKRQg>t4U`3-fKs-g~BJ`|#=6rTm`5|L*r!(KqwNK3<upa;en%=N9d|CK2o2
z>pgGQxaBX&eEI7FzC&G~JnU><PM;N16#1h3y49M?CV`n3wtZJ%C>LU9c+$Y2{?uf`
zpZscGC+T|83iqH3k7oSY?74Ht*IOr+G~So|6~MkOFml$kt@+6hXYVc3iH&wl`=0%D
z!;uv$A`*T#Ufc8U_2a(Y+=tb?0hymq*4FPodHIq<1Z&aknHvwN$Q1Z*7h!x=5-urz
zB$_*(A?nBKxW+FR3wP))^F9$_%&>Zbkw?rI<`2(yivGB{`p$v;HFfe?+od8-{thTR
zW?Rv-{`HRw>>Dn|Y;W3AySU=nG0!d6E8Zy{pPy~O`1ri@ahB@-OVf|-v%UC-H&N!-
z<UNzmPGsGpAU;{v--f|@@~;c*fvy!le_UM?eL#PGo!qMpnYDFSVmF9ipK;)Cz0{X}
zo*(nQKj!tytz2>@rLi+COrqjOX_#!kQiGYsdfSiTM`!bLM_S2EUKaa$*RBuWuFX6o
z|85ea<u``Md($)Rr4;|&Z1?`?-@d-a*5nAslN~C{{28`A4PxKN@pS=z@S;sNo{xOZ
z56x29wQaSbcf|*Wz-izOfAc>dt=#n|<J!6e^K15eU$c9SSM22bq5b+uO5O6;4s!cl
zi}rDRd^h>=Uc2aC8&94O0-JC9_I`Z6@Np9#zun!{+(-U1i0hivR(_EcciVG)Yeb)F
z+jYf&?=h`O9`73)U(aIrQl))L=bxGGww0{?U3&~yWXKgVrp=fZ_|92t?!L#Suf)r@
zZ4)!Pch_IWY{SAyo-q$Y-7U{D_`6oh%$HtvCGT1Pp?iE+O;;#dWL_@%&+vFvZ_l@L
z{9E%X%pV<_`e?1si|=9OQhw`<Sl6CB_sOa?htGW>c&D$%I{#z$ESnF;->fU`nI4}%
z|3qw`c;e&96VFFIH|6iS#(eZ*hLqlPyA^d8Hu#qGZ!=MCnzVyugUsgxk1x++`0`6*
zQG-aSXx=KmMQblG!uGj_Y}J+by_VFb>vxR%#KS4RQhtqBW0;r|KF%@x^Xjb#Lr1ri
zmQrh|1Q#2F$d*6{QP|c-?Y58WkLn5^-0r=6OL6?Gb&O(Vcf)PX<!-*Qw6$3GY+v;U
zwZ+R`-j}iQR_{#h;(k7>Wb3pi4Xcj%HOynwZm``Uo58$b^5e9fDf2V0)X8O)U9<H$
z^NvAoXGVkljP))b`g^|AOTA3{?m4r6n$ua9=A4Pb7Yx3?adZ5`n_n~O_#b`g{uK(>
zXa5L(xc7U@Kb5RmXM2w>sS<r<w04nkzR!fiJ-G)G9G)*+!0)wa$A11A=jND?%T_+j
z-10JK{Tg$bJ&i33BbeNoH?mYqy<V^PaKok@dv?@LaFq5|uF~q6{F94eR@Z|q{+$zF
z+p%4(2{EaBuyw=TZRS(NW@oTGc-*(3u5{9UX<faN*TRc4#DA?@T`D<sjc(s#t~M?6
zO)H-~wqRcPe4B%<-^#38Tl?CVo?n?2+@bS;;iO{FZAS$@2KAMq&-cmxI4=61p;PgK
zjpKs24D-tyJX+p(q}ei_S}AG6$Fb%=LvNh^wCk={_L!JNpH$wlz4z>|=BMiC`1s}D
zJYHYuAh&YqtFo$dGuaj|Kcl{7Zn>*M(C&7>7Ur0D-H%VlW*@iO-uigcEXyxX(ko{+
zh9A~=Q2I~nah*WLz3jEdZXZrw&|lnVAyc1WU>BUMdtCNc>AGUSkAAZs{AcKuGrjUv
ztzQ3h#p5o+=~GW1Zw-8X@Xre77h2lK-_@DDs>(aPHu!+l+IchHFzlQ-v%>DI1k3Nd
zKiOBd<i?$jzxcK^)-K-c(o^$eHx7PL=sP0Q{7!@EtLSq%l^?s)mt_}a%;M{>e8lUg
z<gzg;JxlOP3Y$vb4+*)QD@^2jrd_Xk5~bhq#riyd+!?2%-zIqpRY<Catn+v7slLFz
zJTRh8`(xbn2X95c`i8j)=5fjt_Uu_#rPUwDzg_fs{fAwq+qeI&iRarIe%WwGY<`XC
zzwf`+B!4`<m!X=qC`!CvxAeYLwwzSgsf=ZpnZ;+$5UnwYV~_iqesG@Ir9Fj9wtkzD
z;?}v%pJf4i=k9b_$2axAzRrAjUOaQw_WrMT`EI*;XP*{0mVH3nII-c4@s;P39c1^4
zKC?Q0^z>To$i0`ZADp%K)NP-SoR8UaX8W1*PhYrZ_5S{(gQD+kWLHIN%V_)jX0*E*
z#qiBaoBN;8YUzA-$49>5W%IMXy|iAkXHy<S>+!PZ66@{krM_x!$v&%mz-sHeX-oby
zEPdLbQ2sNp;XlK^70lOHtcyPwXLHG>c=^e~<>Flx9>TZIHhi3X;o}v#g8Heccm4!l
z+VY=4EW1i=A@gn5CAVuAMK5~L{+}VdHS}1KPXqr_hVm)lj1T-f_kWn1Z9eVVFV&-S
zrx^U4c;~cQ&G{KT33`mre;;33{)K_;9h@dL=8InL`p<CaKJSgJYk{%1w$03VX6feU
z;^nw*(zkDiT`#Nb+A~kuTDwGg?uigr8OA>iqEjEB98t4-{R^q`&Mncs>LCrj*%k+7
zx#pc&zEQT9A>*h{vCE_*uT}CEPQ3EOz?Q@IsMQ|#!@JgP|1kN;wSPj__q_hkz!!IV
zv(G*kon(~{J7m7vtj}BA_3ae{s853nWA5W*+WKsM>WX8^dpB?2>3A`{Glu^_SMIdw
z%U>RB?QLDVB5$9I*33Vx(Tf_mGF*eaR<L|kC|w@tz~PYK$k2S_!YYwO2j&Yw7ac@X
zFPr>)$9m+N*(Dz<kB^%9XL_z|7u)u|;HSBPtBK3=ln#kHx2203q&hY(TBHH#j4e=N
zyCYgI_v6`IR|b)uuE#tJ+jpGR6=9U0$p7b)|L-S(Pn`cV*i8QS6Ex#=%_V_ZX@V7#
z=9NGP-W$8Wzm?uwc((HR5q{B%#RvB(UH<Bk8}<FrQNv0ZL#B`06(-$n<C-|nh~v>-
zwJwHFtNZ)xIJa${^y--Q%=dHaqD!{jcyqVe@bUVce=>g9KD-;HxplqR>kX!R-)+|W
z@vv0u)a-8I<`XOv8>;p9R>f51_5}a@cr#jMZ_Uw8=emm3Aq;oB_t#zAeO>0_M(;c=
zb3TX{uDbfX4KTjWyw<9VA^X^*#VQ@^dJIC_99QUW`K_&V?Ap0|Om}?(wVr<xP3r>3
z64K2@B%ki^;Xgy;{2TEfx?bJCHUH+l@^5Qq*InIQc6Z{JyXTB&*%{sb5%-_LZR3^;
z%9o#g)R{hU?V`+sJU5<O-aH{6_jT@v;sbKxALH9iz3=9GMem;R)l$jb*kgu6M`3G%
z)y6-s^B=Jv&gc8#erWnUwGZcSnfM0#bE^b9ma56?uw9bukNI|dUk3C3kM7-T{|G+p
z(EsW>>DkLujxqu~dp-+ZsB?M#>p{3`{YT?tR<hH-t=s<LU3RX5K*}^*N0mvc>XKS)
z)!&al^u1P-Cw<6{Yu@9--)8Oa*g8@ArgK4;kY&NnDMEbrB0Y{-eSNgzkK@K4&E^fI
z-E)hhF6Qb?;=Oe1<+pdo94EHid#<c5)5Y+1kLO2j?+@QwbxN;xWnaBgvoSfJ<LC4A
z*i*JUUjH*a8~exqqkH?q{(z764S6~yZK?dltgn17G!<}b$ouWi;;gY<{A2M$`=&3q
z|7M!Zb+~<d-)y@NGG_$cL_MW<^)a3+40tVWFT6)_@%hr#>pCCJ6=S(uBxSD`uT$hH
z!Q8i3`dWkad!ZkfJw6<7SH89`C3E5YjIDi)JKssRg$7jj)lEGSQ{(^9uAeI_PT@y=
zW%8np((jUYN2av;mpd@Z_i4}g_-uLMBirDI)0Qrnq4RK8dbVN7%r41&4yz7S2{0UA
zE_&(JEcK((Yn8t7Iv=r<6bo{ASrB*kP{*kWH?1oq&YLj4oEARqS8tE_wn=NQT^C$4
z(POWq#|cy0B72^a>zC~0GpF&cyp`oCK0|remfDZIW=b^h*QCq0mR<qxT@Q2>VMIGb
z3w>|sbJTt8XzRwdFkXPqO!cFlGe_KkOZ@ljqqc{FNCwcN0{CGkShiZC%gKWlAY^T^
zf6W>RKW>R*`R~QA142Ikz4&znWGCCx{cY3!S<d^<5dWW{MeozK+c^{eu0GFEUaWQ|
zFZkcp^2-_bW%M3p$j4n?<SwB%FG|2}wVeCIvq697)j$5vaP<DyOZR`CjeG7O=fAK*
z<d4CJ@{X-9^X+c0x9wSXok4bm(=^87lcy6VRXr%svTv|b-6AKr<JwK;`|&YWTh5+l
ziQ(P(>&g5$TkR{RwN|#tZu>5)S#-U-`yk1hEokT8>?3y4KMvYS$HxCKKF0W_rFdrU
zJ1;T63R||aIZXUJ&uK7U-lzH^Y}WS%(~7+0u<GjcosB1+s<`TK*lzBTPs(om5IgsK
z-+zYm+k1J{j-JW8_Q84{-!V^7&o3|b9a>xC{$u^(*dJw&9Y35soK-b#N!yLuxe}|i
zKU5yqaa3S@{qsfUEcuQ-tRJFJUwrI#JxM-$meRa5Zv&kL<%$-MSH@{DPy2BENN@1N
zW8L`=|1*eI`2CW|HR>$pdR(C6Uq5xH`h+{zV;|P~Km6YFCug4WrDv1u1(u{#8ch~9
zdaLDkf7gEo%{TszFWp=141XktefYco+qI><^OkIE+0~VzwdTgb)W<AxJKn~~Ue4I3
z_T%A#^AEBMl(tW@T)%vK$8kxnpJm6`Km1wpAZ3E(0>1Xg`~S4si2rAZpH%-MFnrDH
z`cI5k>R(@r|I2cH(x3X|ynn*`N|W>R%M~X!oU^|=r?z1Er*+?MACnUA-ebPTd*S`s
zmAY^5rrLQQ-!es|_NnBHssMK8-_|dtZTwLkc%3Kf`lLIvQ;RGku7+-~HHx0UV7=SN
zXPudk_*q_U3ol%qboXgz$1_V&&oH?;{il`pR`efS-yyb7aMO+r*I71XhEJZpeb1-U
zN@ql!S03MXZmzw|_56?QkDj+*KlFW8S#yo4tk*B)e5nI#ToR96I`ViO=K)E}qYSAv
zwpX6*|2uDwH_Mrb$mn|qCGGEgDsTL`b9Fs~jd{9wTk1!a4NIn9KKIcTw5BWW^vh|w
zb4nkr`%!=ByPU`m_O-X}E$sXB+<*73dB#^I7b$3d=UP3#=IVcj#`;g*Z|z?{%KyvN
zU>V=Bp<kq?@Wb`C_nf(Q8dvWrRpd-7|8q!VeL<f|mua6#NH0TZs00_g18c?gW4ziy
z7j1$U{c*Ugd*)^0HtQvE2O}o<Z{#`UpJ$uBddrXF531+t=Ep}wm))MWeVW<lkY5SR
z^A9UESk$^*v*)P0_$%^qh%?)Ip)DVlbJZMrai@My<Gw8~68uH~UD}>mw<x++P4>c(
zKPzsmVwZovGk>k|A^Yqf(?8bkNy$xVceyNndYchXo`S&YucyT=ULJVw{aCp8!{f&)
zc1G2isu#DaetIWmICsg4u=w?-WYX6wZ)cEus*<UbE7P%Fx@pZCBMG?$T@SbXXW)2w
zpJ$KdBVT9Nc^?l)dzL=AW3@@yt?zIKmu=y>Dz4n`$Lv{dW%^f(t()$@Su<bAe8%$)
z2W9Fk*4%I7exxr_ksW!(Ca~E0R@TwQTenYcm?&_ttKWGy^Uc~9%xCvW)Hq+NkzaQ2
zbmii#@LNmYJbk*mRdmyy#3H|{cj~XB>ILdlU(7T6u;y37hj*){H%{}8eA-zk++=*h
z@}MO9>+pZFKi2WLh0p45wu#<%M)lyVTW=N`?oi;n^VWhzzKG8-R=nq*Vdk`3SvP;U
zy*0HhdV0-5NMhzyBX{*a&le2O)fx;-b=jkSw11FxG&^-Et1NfNVZ~dQIZhw#tM8Vu
z`TG3Y(o1!w_U&(Vx11OIksNsWd83%Vk;VsUX2~133omap-TSz=`q)144}8;CAJBce
zuRH(S3W2uJJH`|H8sw+j{an3mzgP|XnoK3{wL$k(C)!tMmrLGRzH4HW^oJ7*SUzxk
zFk8O*Rn>on)^xRR@6|pDompCL_U32$a@*sw*-aTWz8{u9V%xsv_yX@WnNoB5o@F@d
ztU4Ey+F<Q*j?azP{NWR^sP)Hr-NQdD-F)!ji=>1%D}s!7G8~X?te>I3;DuGV^WvJ&
zofrPZuYMtCJ2ls4rTv9>VoU<-{5X&KN#rVgc|9k9HT!(0Jl{UWn@5*a^l9%FQ03d7
z8M`nhrS^+D^W^JH(;shr{w@CSS#{5i6Zf6AIVqX@d;4*zR7cyNw~F6})ag~MKLB$5
z)3#&Lxy~`=j@i0<Tz1tQwyY^R=4Mnp@pxhW5jo~-ruB_hrrplUuccD53l!{Y)=3*a
zzM{=%8!p25%1+|KS{w6+yJfY`nR-0vbQYDb{rK7NtICt>Q~uOmPQE<d<zwhHg)0kp
z3Cs*`y1{y3{&z3=bKCZae(XQIeV+XG=Gc9))*;(^T&>QnVV){+htK-)@_^#6>%5Q6
zdj3a`U-V_2#D`0#^=9Vk&n~VASoKT%;0uuyp2w;iUpk1_+1)XH-}XnhP(thTiE|6&
zS||E{T_647e7lZ~?!xHIGJz$TLX66*9-R8|^u~9U>(l<!bt+!oqc4)ZRp{kZ_XUiF
zaaX}d1#($+Z4YG+410lo5(3l0SDBjr!JZC`7Luk%ek^}@z161b*}DfxA03=4Q#JUH
zJKS~r<CeWu!Dy<oLXTj)?T_Th=?}K|l-lygZJYmF>YKy2?;g8%NZxvUC86O!5yzhc
zruQH0@BG4$@?n0jp3-q`mZcAa-*`>pf3&suz<t3Q_w2`l5B5tRSoHT_gq~BY>c$Du
z`KJ!_^;m!X_WJgt<Begr&;Ms=>AJDZxS=ol-_P6gFYUSQH7~zQUh1ul>+&dHvF;@;
zY>!-zy`4RSe`VUszxOhjy&vVcAKWjz$NF(c$l5KRxT`O*FSzCT-EeaMckP8guCLoR
z_kq~Ptx4CT_N>kp@be5loqSz^^*=*n*nfs2`Mm!b4uw9*_4aeQTO@pL+t<&*6WFg$
z{m;-CKH&$~x7UyCdusGW-{j=h7$@(Gz0*E5nxk&Hp!^T!0QQBirI;$>AGm(&TadT?
zvV@oWM-Mm90m?2{?3uo-j@ECxw*8{ZqDTKCQ)c-T-Hx5;R?84K@A<V|`knTleD^s2
zXNU``*E;n>_dkQmY}a4s|1&IpvVbr6@s~`QeWH6@xeKP}tyXy5KjE=`VC?jx?|-UB
zz5iR>|DVBaY2kl{E&P9~e<c6gFaJ}wY`$3Hr25ufpWIq!IIVM#oqA>R4+dWb`-ky=
zCi61?+xws4TXJ0KLtU-wfqxg<c-mg^R@@}y&yhGm<xUs>1BrhY>n1<s@BL$6{;*g4
zaA3Ur+l~bZ*N%z*6FTze#_h}VwoZQF|I<QWvi`cH{g;#y#+Pv>P1^IBvoHE|y(sy4
zP9l9mOY;u_-pcuh*G4_|{5yZE_YaxQwXPRi{xcMAJotz6Y*l;r{wLno>)*&1{%2Tp
zYtQ=!|7OZ<{gCw8d&=9sPu?@Q>&uH8=O4CQz^DDN|46R*!8*A~k|nlYul_wtn^e6b
zIgHOr<D}(*e<@e?3)LxQ+_U^}{l=mX-&Tq(<mp!3dSs)X@WdTLd<Ee)RgEvB>v?PP
zvmaRK?Gb<U_g3lAIqwo@UYKz+;<+UAbro|b{^#2t>vw-!#9eEAP<Y+SwRgQ%O7tnc
zXx<;g<nL!y<fp-`eRQAPbvwlxr4QE@ruEt_&6KEIP~0~A>8&H_t5b{ne%gC&PW*V$
z<Z9RXqw}P$l-yp*7`eFrsY>~c-yU*zt{p$3FI2Jn;62$3U%d-?PaO+2t=;fMOtCWl
z-Act{wb$0KKkDE9YunUaufw()oY+~Ny}~o`O-S?n!zK*-`4v9+e9o#m-lBfz`#Pz2
zx^K*6Cb3VedvP+S?8`d;hwuMP@0YIMt^Th!>g*5ST-NVLd+Q@VyxlVGpNybQ=2Weg
zjX@&(D|FZs`=&1a@cp0n@!tJ+C;w+~Tgy;&Z}qC18M9)KEnVTaAu4)drR;8g$5;Li
z)|2C0ANJ~ff3&~rc#r81DSQ5#JAL(z{(I9RzsJ3B;-u<%Ry^V6kL_h&%2|Be6XmvY
zmEG)zwst|64Eo%vmj^c=TB&T)IImfK<<Y9x^+(^gi<wQhb?db!?;Z^yi%X{pL>&$=
z<SgJbfBb9}+t!C`wO*!9_Pv*vm)QN@%bfMV_uD)8zpoDC@4F?tMerbdTWJUA-VPNF
zQHKW%<^@{%T><k2Uew8hb^xE;8>^RZA==xRakzPRCd0!>%f}1V7p$A__2c-1z3#r=
zE4QX>nx2=keMcBy`;G^R`xZ6WoImtWW&7SAZ*y0@*p>RUwQt#4-YZ7Ow}@+b+N30G
zT3lNkCV%+aK9#>qw`xbfII?X*>yBw^FBKvdADSPyW2N#w?ad#i_kHb`srdK)w<=SG
z*IACtCGEV%j~SS%Qsz9qP~3CPy!}7Jljk;|EL{Iv#f0(AicXoViK`ZA99q@GV9RRN
zrNOY8?1g+Ka_Hx#?)=YydS;jd&m@VZ8Y#Up$8KNP)Vy_roBP6p_H&afW*_~f-=e#A
zo$2cRn~xV8v>9*NuD;UZ1;dkBch;O2sHpe-XxD2ub=B?8wP`YaJ?)z{-?*>EKIGJW
z;6lab19p1;toLrmWbKomV>8YCh5h5Z?&^g$j3-!YJ(MkvR=vf!I$AW$|7-8p)uCW&
z0UOq1`=NKUz&esRXhy^><)UckUar2Oee@q|?C}L>AMvxqZ)gwMy1(+8`GZrtLp|eT
za@b!SxWK*z$yCNCv-}U<t#)XuFsbh_pBCV&IdgH>iG+;@&fovSP$!xe-(r62$Orwd
zn%q^{W`Y*=8-Lb$S<3B5{<HaoYt>W{#$y$?AAa56^=8Y)^lG)qwM+K>=AI#}q&Hvh
zPM=-q3pc|bVY1H;{jEMyRda2g?#I2?7V}R#B$2SyLpjp$)9%*)43W*}_$+0!FTYs8
zZxP?xU;98?pYOHRlg)QuUg|XY{ky`&KTCw6i@~O`@44L{=Fq%4=^v*fHe1DQ|0q=5
zt9tBaQrjVmCsS>?o9ESj{&9W9MF*K>+dq7FzP3j`_u;la@9Jz*|Ba`G>prmsHXQhF
zTRd;)*JnFt?vwhlKX}W(lh^8$Vq;#MF*_+z<|(sj`S&QF$>(dHzl>Y#$`Eqd<hHN8
z_qNM9NrF+g-IP1oQ{~V6zPbys5``b__}^WqN7#vfd>p-P`v>g|j+(pPVY4(IZ1;<4
zofxsr)@Whv$@LB&uFcn*{4iGDceY&a(kTXZ_Io-+{xfJXT!EccL;AuPjRj1r<oDEV
z&0A$~c;b6<PdE*4N%LQ@8osC<cEssMi=XV*R)eRHSY@qUE|s*++;Z0F)X8NEr*hX`
zGgdS1z3p($sOs0Oy{;k*VR`Ex{*{mKOuD+|boABu-OrCmtiSM8zBMz@L1LDB^B>Cv
z=byi<(w(~IuFaK>eC7*=Pr~ks1h8AL_`!W>*4E20_kPqJioEl5$tS1H@<DD!bz7G&
z&6pL|o|kZG%j;;3+0!DFICuP-Q~D?CuLxt@w?pTHwq<9(_Rw397p3_ays|s?T=&w9
zdvaHUy)Wy$3G>e6?v4;sIHGv0tZ~+zWxG^1ZYk2*WuSHHutM_{K?gb8586H(uI`h(
zck7i|+4r)yD^K_oACoP9di)%J)fZp=gK`oV^NwDdyD;MR9lo&84zc)!w)R|CE^k?D
z?XlhLS8D6pDLhBdlwDYp713ba%8<@b{5I?bljVc9SNFWDJ0@kQ22FBTY6}lCaAa@d
zcpDgzdCe+z#n#Jj-oDknG6}k-qk&<yNdS}4>Xl)PQ+fp497WHZ{_y<Z=9t;-AFj<x
zQCAmW;Dqc!@ONc!&CO+<yDR$EK^XxzgNa8PK`ToeJxjCmYjeI`FZ&qdsBrkG7F(EH
ziyW`7D?_nCjrE7ptrvE5AFe8xBGZ(>!u*C`^il%@0|UbgR$URsxvO7Dx$WgTx;k^)
z_7(ff&vzKcFSO}DwCbfC|A%Ldmh-(X=B!P7`D}@Eg<NC5LfBmqhMt8}Rx^b32vkWu
zOaD>);P&ihn|;!k_Z%uJJ0v3fMFzCpB5dR3H(PdxZr!k@>Dt}8Hvw*f?hmxKt$+Ad
zI=(mg>Xy^qSL1i3gKT<b&vj{0gMrC@mWp`So#Iwyt;wv1=B!!Gp|{_uQoc3+qANo}
z);x|6`?i+kZI$IZR^9e=X+}fZuZbuBWV?G`+;Udv_M2OA^K;WVEj3vj8SR$K`d?cg
z=peF{SKjC95$B-GYxP}xcNrElo!s!N_rQazw;&mqRn|3nRhQ9$>Z_0=PmrewR#eUM
z3X@2(eW}5)YF6L$<*&XiyB@S@`|gHM$@2_e&ySJ&n%Bkf`0~T}#!K}U6-5`e6}R09
z|Myc}erajgwrSVQPUfcPi|*k-TE))r*e3C#{^5T@ecx_9`e3%2$NKSR!Hs%HQY()c
zzA8SN{72-&om=iLsaMxeSo?3I{;l|egMyzo+sACKKhRn>{hsKDf6G_uEv{d5<IX(|
z2454dv&a2Bo{O&U;o)NFdUoyJw_D!XE0pGOhUZN5YMgp}A$ybNv@o-(+(l6lQ3tbg
z<;t8{5(SHj3Kln>RR6eIB!E@d+5NI<qQI3cue~n{=k>%K*py?pxWK}+aB8gArnVph
zM@g2Tiw=@ub&?lMULUMXUifm`c4v!u+#VN?RsA`TK5HXe|FWo8!dYU!N|%^#?(r^`
z;o9V3VQ<Vh`9kTJ)ms9X)!MW>Qra>uc`OO)>szo|Q$nHnid55K-wl`6?mN3YGCK11
zlkHRUg^umC^%G&7wbhj&^|hSz#TtjMUHhhIR&Q=Kc9ER^qN@MUqKkjjAO2_HKXED3
z%69IOH3lht3Nrn^cMIP}n*=bY^s4qQo+x^;k!Ot&#}}oYGwq~*7)3W5|H$^c=jV6B
zA^!K1!e8Nmt_+UpsoBeKEnPcxi`lgo1{0kcMe7<018%?gmAdrL_iwjKmzYm}er`t>
zYoy9^kSETsja}4WF#DjLNO^iaPsW-vMV=cvCe^>6<^RL+kLrI0r){188Jd3V|1s;{
zq6US{o3>tlD|G49E$QrxgPc5uEw0aPG#(WBCfxmF`=Ryg#hp7oJezab@U%gT-s4ve
z53YNI-IKXI3f(=&7r&gCRg_iu@>=E!uiYuTB1F6%_i;Z~DX{(~S_YXTfuGEiB+(PV
z`~rGLDRs_Q?b2AlwBtX6c#ZSr8u2xmW>rhKp1K`Bjkiktd%)vm{>Aeal*n99jGU+N
z;os~>wNka(q1%O{&REJy@}~vg=#OH!*cI`tQ?>J&$6aUTl_wIa1gg7AE7u>c=daj(
z@YdG<4DIWm_I*DbuzqH8+57e&|2y12pU5%3s9f&_AFxKbDs8owq2l+|rZOvcq?C#<
z#(`W%7S;^)mf>QU#rSFRtE?zj=@7<S(;B!Q)YN`B$1i$bY~!-nZPOoF2k?k*6f#d<
zP*eOxdEz^Zb-%h8KJNDKs#E$9^K#iAw+qj?_>Nr7xnSh|&f%x$`-AL%XMLH=e)ON*
zM;pg4xpCLtJXSNU@mPG9Pohp~>pI;BySG<Y&q$sn$tGthVzl9l!iVg0U+*{G<G=dU
zT&%!<No;z-q+`CT=YQvO_|MSrsKHjP>*K)(`CWBV{~0FMAG`P}?Byk1SHDNLY!Ci3
z{1RCDpP}XRqh;;Kwm)Fq=lbebv5?2tpFZ<X1h8)mjB^%$%e*-HgwnM=_G?~lns-)c
zgZ#|kUtxFWAN$Y1aQA`Prf+ZR+p06!Uy1ZzYp~_(dir*sN`>Bgx%e)Vh4X*DF538?
z;e|`s96PlOdi7VbXXWgF7kljfi3@CNMdPf0Th?E_d*DCAyA!-V>-y*RtnGK6#ZW5J
zbE;YAp!-UT6)AhSbfkH&2>$z$VFBlhH7EDDF1%g8{#wb4jembHiGP-D_WY52bFSm(
zn!qI=Z|Yq<|76Jpc5l}L@%d7F7&G|)_`96_dn5O0&OhtIU)MzbGi;G>m-AQmx>VZh
zly_>vs^<R;{lefSQgOEb8Mu`nxotnz_~D&*sePHR;|%8apDUBUF8R;U5)C>g+Wv|D
zqt(7bH-8;Xvai_R#Zdq87sHpo$w#cr+u#2A8y~!NlTS+JC05-pvi=X_(pT1`?!FG{
zELASN^XhtgbGb=sV*lSYGKKGsx?fwL@t=Xo#xS$++upS5yLoTsY`%NSN%-}aqI#k1
zOFAa6b+s<$mi)HR)xG@gC`+P99aDqN=Y#e<dOBO@|M1$ASpRqJBlVw?)eUdg?|$W;
z!@!<+Z?6$_9}}o;M^?LT;Y`ar_ENL@wlMm2&3tGZ{%J+mhF81{sv<k=+Ir-j9XMU}
z_!ll_zi@#m(7X9X#=$Qp3{!$0{E{-Lr9hD%$MHq8rYhY&U6jF?<hy$El`B&M!J}rN
z8#y=*7fpN}>Y?Rfnm6l(O!cn|tWlb8h4pj(YVNRWYY^&+k#~0EU$}r}(RB6;7n@&X
zFlsD4_{AjQmlT7SW<f2l#6TztSX0Fuq@upME`-5p&O7^7e+LN(%o{OSgw?Iv9|wjW
zGBU_wZ}}R3szGVdl+}C-2Fw>vJ7;fW-^*YcvN<7igMoqOibgi}=YEJ%6aChaFI=D-
z>yS>seZh=&1a67w`5(f_H!JMo0#Ct%T0z?9ZTvrK)wBP2b=~jUm(A?$>mGB9EnBkv
zz4ena=7qA=Umoq{+ic_b@UQuiUnal8R_Cd0ol#_EBzI={M!7AFi$yk`74jBdGjUg&
zd+>w<UlhKJyn0k8Q{f%;A+`0}^c7hz>zAwC|EL$cyrl4}rTQ|@-+LOH9G~sZg4+*r
z0}S^*;yv-;ikrIZ<}a&F0$Ag9|1<2$_;mmGvbBK@qBYz9y<B{{{vD|K5DF6P2T5mL
z6=4hqiP~)c_i}Z}z-hWBW?QjxDifdP6^wglm{q$h7aN|II?=vr#|it^q__(8L)(9v
z$H`n;n*7J@qIq4#+5A8U8Qb#*-!t4zKl9-U_xztvbwK++bhM4~8}2brx7N{H_@ANZ
z?fHK#zE!azj88t*owwtv->tV={`Yq7zX4Zte_Q^r+!g;i`^!B2*RoY#MHruG|F*2K
z_48Y}cKgP8`v1&>j{ml)(6#$=Yx94Gy#@0xvR0iHVSE<whvkDxeBHTE`QP8osMiUb
zy8YmKhNua_hcE8@I;}GPi;in~NdUWT<^z3>8xwN(m@Zk&*OMzaqfRAlyTZ1nYxa8g
zBp&*|JGRjMiB=SN&KZ4RodH|<_krc_WS@^_J$JlQS8aRvaw_+KhAn3qszK$<h{Tou
z(Qo&9HeW99+U<w2PkajU@)SF3PZa!TILXUUT8BO=wrP)X#x*&Si*N2EF4)+lWP6q2
z-_+IcQX3qam2QhaGRJ;!KcczjPrOX-!@X|)U$}25AG+OmOnT>kh739Pz~iE?zRrBa
z&;N3t`o-m!wni74t~osM;)HLhwjEy=@JcN0%a{}~H>LNAk=2|w_J!;_mWNwD=;!*+
za7gmujxC8RW8&8o8LLb#-01l*q>TBpt>@RHB_*&$7D%+giRQfwc3tUn=Ivi9S5)<H
z3d1kWc_6y-<m+Dn%rBOK=)mXd>t8zXFPaIW-9btRM$5vhyQE5Sj_rX?KOlrfA4hqE
z?g{$soHeUvhqu1aAw5O)hSaC~Z4U+hXJC@s`ey6%c=0`o#gFv)4zJ2oY}>xr^V{lR
ziJvog!~)+RNPM^By@Oo0Yf(MFO>Xw%`P_%=B}4XhaJ*NLO`a_C@$cm;b+L}?HbL%_
zB4W)5`WliU&HuQ*tN%X(XZjI;t_nFGE1mnf`<N%Ju$Fh;VE^}v_T`%3N74M86;-bz
zuEYx*+ONg@+3WecMZYKivt_OP&v5WB+rMv*gFgP-GykK}wa(vG6aTH_=}EpIqapuh
z#aGbrhe!=LP$O<pmj<JR&=NK+k5^V*AFK66UjJv{u8I0(-}CO-l@ea3v`Lr43MWeo
zPm;b+eCpdPi}jhLjWn;w*kwDv{Y$j3y1$!ubi|Ruwxr{7OXmGNxq8KGCGEv*Zjxca
z6WmuUV26$51{rc0WoB{C))BwdAoWSy?%tGBkbw@AJ{2(?sAYX|W!s+zKrYd{J)x>k
z^9xdrg^vpoE5`VEp5ceGtq=AKc|M-c?7GJ;I_T0Bp-oLiXI5D?UuisH)Ayk4ALozu
zg+HDh@l3Sn`WkK=>2<(c^8N9T`aAv<RIJ;y=k?M53>+o(0{8wiWM>wiUX-1<y?eJG
zv-xTb<@p7UU(YgB>54w~TwRm!A>z|kUbXv2%{&jxyYcT!|AD;oSNX3km6u<W^Dj32
z(5$CDi&t@6-T0rOHuql$BjhZu!|oOK{~C8a|NEo;KSSTe{|vmcpiURt>-paw$^Wx<
zkUQuKI@Qahy!k)F3+Z+B_dDu;yp{iO@gDzwh6Q;$|1;Dn{%3f+Wz@9Ma2rh_qj_Pp
z3>hsgN9%>r`gBmW94^#n*vdVBdg+h(x;U@8SCt;d{~2mF*K09+<2v{;yr;yU?d96)
zHE}u)%})>QIj#96^!?Wt))V83?&<v4_~`xJj5zWB!{5&TXV4FS{ygxNdE+m|wf4M!
zBsVt)N?q^X%30&OdXD)oM>P?9=hMmCD*iL1H5i!G@<;tRy8hrJd*R!Ar?;++F72D+
z$yQ(diofjrg~GGdRZ7RYK!@`)H8!v>Wng7sV31+Z1n-2#7%*7WrNOjp<?_R;GsB}w
z4|{F6bL66B=P3i0ga>_Xd(vI5ea&yK`cfzP(PUF>McmW#-|7}O)$~=#JHFbxt55q*
zR^QsinY-S0Zn~PYaQ8Ny^-s_2lx{p>!dPUf47xTdy!VRHE@&S%)Ydt0!$p&u`ugt_
zw`^*3d#R!r>h55_Kr{<>#Z)Oz-*Uyq7mn{V7_S7nGJw}}P~{r93M*6Z0Pr<%u%WL}
z8ZzTWjc_>gk*#rg#eCNtmr5Zc85dT*IaMwAI?nsyUjLXMzK^Q&IbJWSbw0fP(wocr
zf!ro3^Te6wEuZ@KN@3h>--^W(7M6r*+@7%E+~p=CTRzS|FBq?c@pt}7Ui8C`+jHyc
z#Z0k2iajm5=Zgw2@GZIaB<Q$_r;gtyw^x#~;0wv_1TaIc3ZzO>LEIQfji4Dd12dVO
z`N-b><-YKX%P*tW?wEBg@x+T0)kgD<R9!#2Yk6;t=ZD?D;}2Yu+9H1K<J@iA6P|g8
z+Dw(}?Yn=dQuM3HMa~D4l+G=@vN9!UjgjF5cZJ{A&&L`3ou?o8bkB!li}DT&9lQ3m
zVZ!`OE5l!pUu7__|H1v3?fM>7y@{2ElYV>Gu$7&*(VevXKD+V@@oRhSK<mZUPqGnQ
z@oGMgo$}3-Yi{0&IQIO__rf!+#liyI`~iH@%D=aM-96!)heGoSr3Pu(?rs<zli$Bj
z>E%DhrjzU5F5NZl?F^Mb0~W2uX_mhle_T(EQ_U2K7u;tZf7XigODz}McCO0syzsAS
zMf{7n7{11HSM*&zI!|WHwKZ2KX*U#03q(o?am_yxcC=#u!PPr!EFbj?{^0v~E_Zq6
z+wOV+7Hz?=u|4e9_RhSk)VpNjR6)nIGz$j4C9@bxMW1}1FLn0ba?|N8fAm*mzveuA
zUhHZ73P#YCr8CWs*V>);p8Eca_HExN&M?c8o3pc973Ln8V8q6t&`>GD__`$2{L0z|
zm+ZH^Tk2h%+TyExqCl{4o^NH#e5rg>&S!SwAHr6yiF7&|_0n0x;tN-A|B>gQ6MnlM
z?cx8Z@BMMj<j6I`5m_6UQa8<ipCBW&d>=o1gZonRBlCs6+~=-1cG>YtLP}S)bIJGC
z0})p$=j0yq{Cht?&_Q;u=o52$snU4WuiX3$uP(oMWzKz)TS4>hJLLrSN7)-M)G1`%
zQ~8kkzN6%H<<o47C7C(<pROp0z0GDk=VO!A^A8Jn!$10U%XcpR@c2IC>Jzgs-9DW&
z^;Wd?#_bE#*XD$unQmt{FTUs6JoWz!E#(1`O4|*4ZJM8Le|NXY;^n!jvh@pX4A<`O
z_+y*B>SUsAv;Tgx%YVPzdRqCQ^jN(V!*%F56s*2v8Z8*;QJ5`Uov|xzZ_2f*wXJ)f
z?yhG!A<cfJO1|){*|l@BR+pr;YY#~XxJ#7jI5jqK+RU?_m+iLd<iCykwlLg68<Dj<
zFIUk~5q_|0-YN;s#T`o|-)q;b{wX|vf$5`ou6@csq7TZ+@9YkY{%NYcyIJSFh+^9X
zgT<<C)$@M{uIqkm`sAMK$MW_)oGH(ZPHm3KeV&;oGXHycbuGvGxa}9dtd7oD`{?pp
zmqG;>HdPCTgt7(vb8Ngn^1ID`v~2QYUe>$`Zi4wi8Hc7bYuz|#c_v-HVSUVx<7;e}
zAKHC$&C*BTPq(HxUzKSpd=n!0<7$8BKf$fn{>i^8jEX!oz3k<;89s##-s&oauXeXT
zjtOr)GGAyP^W6-Nhg*HE1&&F4z2WTog|*J-w-mz{{?=W#>-s-Dca6@t8ujd^-qHli
z@(FAze~s8La6S0Xkg-<j>K0w?NWm4mz1MG=&OFU6EJr>@<ngQ8%>N90by_cenYSx`
zc;A&e-RFwQfrP(b^qe(pm>aw{$RzyYdQzwV<DpG%HrI=dKEK4~8wkZ&iA1Islr`|%
zXyonrRPk~G-}c9SD{a>w?iD(urY-w=`Rx8jAwI44o$Mc$y_A<K@f8ML@wi8OwR?W9
zM6YO|yWjIWU)8T%`_FKcpDX6a@`qEG-%v8TcVkVt;jzWKN-c*qz8^ei{r0H!q5YD3
z3|G9^P@j{<y*#UML3GD*H--%oI!kyGewB(aKASbaSNH4su6c4F<$4##NBB*47kxO<
z@Y%|QB_Ho+8>sxQ3U~h~?6d8feFi`GjeBk?+b3O9Rx)?vRyo`Jrm*Y&<{!N)ejI*e
zI&bPj8PA2$4Zm8SOpq}+=MnSCxMA^5m3hskGavnD_-QH63A#$H{zdQw_RXvTvz>Z(
z95Xzoeawzs>GqG=$IUh}OmHo06etOhOFaI7{b}6AIKvOY|J>!}{xe*ERR7}S3Wk0E
z8742jT>tvSe})&G+BaVx+7r9_=8yPqotL-vM&9FQOh2=byC8X@af4A^p2hmKi=}Jr
zk3{)cEt^{Sty}YZWzn0IyIZ^FNibiLyTVw9Hnz;U;Kfx_XLh%hD_{lGD2+LZKyjEr
z?jVbTPz+{hk-#N?W0uF&{af3nr~X~~pTT~9y1(_Z`}=$ETD-p8wck~QL4f0+B-_r{
zXN~xIUPMj3-&(?JW&Kg?YZG^AhhO-F<q0fvg!_wQc|{nXNZI$-Fh156oqqJ2Rp`3S
zf|@4f(;oN8neFDA@y4%sp7z|2=hiN(_#JSuhV#J;<!p_2Hb0+kYd_s)$MuAPolCOQ
zme0*N?(C`x>%&o35AL|K#hQEO`N%lddv$BCT-~efbg9H5N>|%=%O<zqhwiER-jCs{
zmtxo}lP%&Y#iOMWVB4CQRmbsR-^P->t-Nf9s@p6xqXZ=TcS6o%=Bmn@y8OkrMb~{d
zZr|J>^RvKBu=sW3{d<#g<$ZQsS#)Lj<+ZB&Tg#4?e9-o4o+o<Zgv`A5ovXJ5Fovwz
zQfurp>rpnZ)ul_()9zXyWIA?fo?k@cXF31);P8wETvmO(OJ8$zu6((zeAWDtm;;+w
zf90`@nojMVH#=k6G_eg)x83JXI4I!~GiQ#*gCfsASw}HuNRZa*6Ph3#m4{q3NKQC_
zJLA;eTtSXQM)eREiK0<~oxG7NmihD^-pb$z>#&ecFAyrQy1;UQf6Dx-X%oY<8og0h
zJBT3GtbtcQjPfZCA3+<-r}bJLi}_Z8R$B#w7x-n}6=57GR%CM8+y1pa6I;I9ZTW?L
U)e%}}iQ;;9)I5s9nfd=s0H^3zfdBvi

literal 0
HcmV?d00001

diff --git a/docs/_static/getting_started/rag.jpg b/docs/_static/getting_started/rag.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..b68eca2564f8535b33d9ed6d4ce4d4410d2a0698
GIT binary patch
literal 36897
zcmex=<NpH&0WUXCHwH!~1_nk3Mh1rew;7xnIM~?O*;qN)+1WWcIk<R4czL+Fc_f8|
z`9)-<<mF_gWMmXn^wbrUbd+UeG|V-13=B<7Oyt!qZ7qy!^o&i6K!z}Ka&q!;^GNXW
zN*F21C>oIr{vTiv<Y3Zek!5C7Vqg+vWEN!ne}qAvfq{{g5eyihfRTxrg_Vt+gOiK<
z{}G0*0t`%y%*;$I%&e>|EDQ{cwTw*63@n1ILW+itY{G$w>`H|qMvW5}awt1(JSZA;
z@q>zSQc)8pmzcPOq?D?fx`w8fiK&^ng{76Vi>sTvho@I?NN8AiL}XNQN@`kqMrKxV
zNoiSmMP*fUOKV$uM`zch$y26In?7UatVN5LEM2yI#mZHiHgDOwZTpU$yAB;ba`f2o
z6DLnyx_ss8wd*%--g@}x@sp>|p1*kc>f@)+U%r0({^RE_kiQrin8CgR5fG1|`Ad+2
ziIItgg_(sN<S$01at1~wW<eHKMME|r$3XVPLSZGNMh+3Di3>L#<Wx2e`XHKAbdgI;
z#pI#tN08URJ|oU!O=S5D?lFYFZZYsMGcqs<G7B=;GkpK0v8aJ-6c3{?Sa0)V?_=)$
zJU{ZdWmUF+JGWl{X0BQ7(-)HM`xW@h*tascKbG&FCdIb=Z~LN1-*wqb-uyP*=5_2|
zkAuqmZw|jEMr7DoW;dVrEUD#`p4xdeqVfHw6WQy2lz#Yj{J<}xRH59YQr^lkx5ycv
zLj6<OKflV}x`1E)KSN9P<N1I2s^tIvjQ{7p_@ChWzWOhgFY3QPt^X;vx2cQa<9~)9
z?)xnNGhBGS@IS--ss9<CRM|h2`|<oggY&%h{|t5E{~0E}zM(N{812IB4S!$e=lVm5
z3-{T#pNy;j6cb|n<MgBXP1DbRv~OQqT5D)I?bVToQ_9uz9c(Y~SAEOkU`gm{c94h@
z{#e)l*j6FS%J$e7_MD9tM<#6l9#QtsedF3&AO15)y%hSR@v(G$?;mSHU%7XId@GYB
z4)J^sydmfDCGo|L^;uhYwS7A$y*ra-g1^*B>As#dM*L4Qn6idbvGei1#PV<LZ>1lc
z=G<`MaN2dP<qHa*o7G!P|Fk}7z5GZ1KXdt8{xiH0RsYvq>L7Q}RV09sM9i-JQR|eh
z^TXvG52re7MR*&W|Gqf(!M`l6{)4Yq>e~sw5IW_V+U3z`*QveWuSnSOzW)qQ=2cAp
zcQ8Eum*(pEKUDuSsI)u%XV~8VS7TCxEmN1qf?<RA)E}|`RAMLmpJ7AD{9n9Rt63}l
zGj!BHc~PVOpCMtT{TJy~4Bx=joc6gri618)-Y2p@*GOf4$dxN^v#Wg)rGKxvx9@=P
z>#pbi$9H!dZF()odUZ|C$^Q(od*<2oblFESe5=wHKJ-uTVomb0+Wl_3h28j6dV1Fj
zR~dc#=e{jFkLdct@;`&@e}<DbANBt>t=s=4INbh&_did**8dFo^1lLAOXRHoGfb*{
z6#ttww*JMkb@dOX|CuDq{qLUrmqlI-U%*}%F4^Yee+Gg73@0!A@c(x(sQ!g{=o`_;
z{}lf-OkVh5{@<o0{~0ditzf>P-T7nvqm_G<A8_A(9e%UgGCDi`-|KyKiSI-V<sO9R
z###S%cUJzv?LW~`Q*O!ZkGBm^F#cz-+rqf;L-c`Zv*cUfmj2!m7MmQ@f8N-`=;E<E
zt5(_?8F;>*Q?}lJn#S|aN7GCvnX^X9q!`#b%D&b<RD87f!CRpW4nx}(o*iFqXeTw;
zRbAJa{*e8cuYTZVW4_ikvYQ?FPpyjJ6ybMOjj2kP)xNqX|MBm5mY7Vzm>Xw{N;PMx
z7;cc}&@w9i_x)vD_HU03^ZyBUYqY&yKB=h7>cP*1y7Zr+=ldJF|1+>O71iBT^M5SL
zP@P*OR%gF;&yUHq54LWcnU|~hIqJcN8;7&Z7+(KL{<Lo29)IWm43n3AWdB>q|5uck
zp-R8krgBk*xWD(@u<*>(6HT`Sq&z)dH{RfT@-as7$9vV;?sn@R^rjv5o6w*)TV3Hh
z<5b804Et7hUHg-|>}E~vm96;#H)G>(XU^QdxrrtG)7=8*#14s*WxpoIw!S}po-@lV
zIPzXab>zHTC7LS!EUA(H0d-XjUvtf4He55Y_}Xh$x^we1V<oF{<`v(RSp=%2KHsRb
z`cd)2_UrH2(KBN1+O83EoVsM{)WB}OgKi%uWzYD*y>jD*k8u+v=0v#EUCi3dW7Nj8
z-0nxn%loFm-^xF1?fp{Kx4)Vx#xvKT>57Lr$Lx&s1pbdji|vjzSnc>{dE0gA%c^M$
zAHLol?d$aPfl%eamTIpja~f|p{^6~<pwQDiA-na*-iQ4^yzJ!Sj|%1O-YlQJ{lN1g
z<_UJIB5FOB{n7uJVs-w|YhAy02~vl|n%<uLy-e}S-{9;z6UMyw4ztCNeCoKiZ<zJy
z^=TpBf;CJ|X78SqzTE!rm*|b_y?=x@&5pYA$Nll?D@wV3PZw>!`^WHkG2`3s;{O=q
zPCBrP1Y%&B{|tBw;9dOqiUEcyY-SEwF^t0a8y9-!4`F;G+Wgp_=isCN44n60Og3;X
zn7?a6|JNessrhGkey43;v4EfDNA*Y1ee(Gq{QDAT9l5zv?7AzrhLh*P<Eq7zBvKL{
z_k^_{Ip6s*mv8o}3g@H0if2cf`z~KK-Q4o=Bz6bmgU4f@D}=Afwv)ZM$2Cvkn#t+t
z8`t0cyx6Aw@8v6duIFp(gnsayd*CnL_BQh1Io0&3_3x)8*?Td3vS+(@`eXH@H&)Nq
zeY<w;S4^gk7?aQDT+4t3pAMHr_Sv#my{gZMj&8a1$9AnNw<q8Hs+v1xcenKX;#p?=
z%C&6P^27XmS#}ziYaC_xldDsecABN15@waxNU*hdk}&b$x7X`(7wp+K>DDWW=S{A+
zK6)*j+P;vzNrSn|zsHU>SGAPwdSXCuVZ-Xm1OFMy6C}^)``3iesxiNsS9<;ybI8?o
zYg4vnpH`n`ks^`W(eY=`-{8zY+1J+Yll-A|>0{o*4_6yz`aRW^d%(YGp1s%Yey2-|
zt}VHieEIG4powO-7kVmtb}+N<ddyH4!uU8&dG0^)n(TVfJGZ@l@og2KYO--%%`)q9
z{&i1y`yR|GS@-SxqyG$zYrY)c8(YQQqxZB;z-L`G!;Ta7G1b?TtCKTV-15w<&E-A2
zr<q^mglt50^U-318iNn6TQBVFKO82PU@5)$&Vv68yNc|+7*75ueIzgRWA&j~(>AYn
z<PrNk?KexNWbo65q9fng6`22>w0%AI@9h5!Pdu*LzfrmWOIQ2GpDmyM>|LB@_V&2M
zmsM77?MpMv&gU<A`R!Z4?)NE81`XR7PBQOfSisA-m~CJ5N8Y3V85&IOB`U;@NlM8U
zKFVg;D;m2q@M}=;tF2%9c0S_OK3t|*y(Vty(#<E7ge+9@6ed`(U${Qu#b?Kl?vFdJ
zuMIf8FhgL~VMC`yk&LQUpTEzXQLXTD7Q@&1Tp5NH$%l63^@?$9X$+iElK$%LkB#ad
z*WzeW@zo!%|KxAu{huLW_4>aeSI_?u`p+=Qd13wS<Nv03#<JrrC`N2c4NEgeBU?&s
z=K33dyTdlNeY#yxua|m)$4Y*^+T5itrSdvcbiVACuC`7nuPksANWb!U<@eQFaJI79
z?H}*|sXkBjKf{L5>;Jm4p8w;lKdE?O|J%p^8N5{1HRF!4{$ba6&VMxdKf@vOkNyYi
zbZ2*)Ca2rXT`Xxc{|8TB{R6eVlQ!67KJ@mN{1NxsZMU_}!CjJtlSP`}ZTRPYMM#6W
zLcl@ciCjz2^_~jlN2<qL^uiC?*`J+yb%EfmETQ^U)9kvZeL3#-vX77B`Awy1mX9J6
zC#v)w3%%ILW9ulwK-%cjntp*Fi(Nl#ZH~*fYBAf`_-VDw?)k2Pr{$M26xT~^-BIG-
z7PajgQ}l^{$GIk-7Ju;1d&fV~#UE~aOm_NFyfDINX{vXk^2%3hp6q0Q5iR=cm+!iz
zALkznyY+8rrt0z*ljCaVZpCfa;}3p4iS6+P#x3cS<DO#F>qE8<hPF{8{waM}*ZF8S
z|Hju*HQ|9hQ_>zjwp(?0;*apz+Yd~;{3Q9|eyP`1E<WXzS>6eM11G_TZbIY?{xdAv
zysj>|{^KwEhXECb|8<?)`P275!|~V9L7P^`$Nayvt)4$M|IZ-5_@5|ykNt}^ujfy>
z|EF$XO<CuB@SoJj_O3t9k80KbiMj2}{@Y&uE#Lg?%s>Ab=DFVOH_83a5I!g6>$mlb
zUmyP`P=C_-$K3x6iGlwa7UyX&{AU=nKEo2qpFdO|U1xUkV|+}CZOEEEpkcQCi)*+(
zJe~Vt|Fw1NOK<OaGW+;X&hLRIe+jv7t?CtFII*aYBS<3I<B=;@M;FrySqA0>%P+bz
zhy;!dEc*CA1Bd;Sg@641Gi+G#pW)(m(Knc4B8+dy4-k(tE0)e|S|O=2htYxCwK!`2
z5q<WTlH&W6FHgL_O68Ed{0GsKf3+v=|M8#ULAR6Z=2zm+x8Hobt@%Gg?bHSQL5ucO
z)MQ^X<^Rc8V{<J(<jchEj9;w-+h2SCXW*QEjMw>}jSYKx4uADd1NY}j7uYuh#yM_(
zJoov>7iE_J8A=|1$+CYa`r~5h+4H?$P88pgXqTVj)L>iJ^@;ySc)hUte}?}4P9wFd
zpMl%I9993LHD6}C?#JK|mamK{$N%*%;7?ezr}>{Ke^2zZ9~;*;RW)pS{GY-7;(rFN
z*~d;F`V)HfRA0-Qo&OmwUSWLwC42&ZTfD%+{|vz&?LL0|&tNNf{&TVWA4Pj{h5row
z^&MiDl9$gF|FJlLef`Ba=f5*Qo;ts5_rmz(jjoda8BTb6n4a&wCjWYm;o|!XUR^w~
zaQ~N#e<5IF>knP|cx+DnqwEjo15)Lyyu*svpY+&AO_IOGeR$&u#j7Q~>HgDqOk!22
ze|+HI6vo#j;lj)Q#C}j)*(lzTyD<4`-qeN1A3fl|w(GX8+wq|Ga~t<;-_`y5gV#EH
zgAAs#(6Vt*m+ck*84mIP)TkHU|3=IHOYTwd5EqIFLp8}B5S_X1N4w{TbF+T6ZLAhu
zy6NN2^E$O6d=bC7KCQH=f85_uY`eW-%jD&mGqrAQ{lIMh?kn>PX8ENIRmaw^duheG
z{#I$O+pSY_XKfnGs~NA%PmIf5@u&RK(_LnEq8Ceb>puUenbD>AT}JFb!*N%8_aD+r
zXRm)NFZQZ@p5m40>Bb(X<!Y_oS$wznw%+^lmbLcoTg7(e_U@g-yrjTIuwKzY&i~n;
z%#Y!Q$Cv#`yZB4|;N<*&b_f1(`^7g-m)hM~6ZzOTIwa@GvEHrj8}y?47l^(w31DYk
z_DAxA+{`WO+NK-vKi)O<$@xDlbN4STy(T3$d8tOMwfCki%nP<RCjWC<e-U@(?ryus
z+;GP8Hp^t&FjpS_2U%Vk%u}!Illq|_cK+5L)8$WhZ85CWsTVo8&g=67{<c5ww`APv
zTfHQPf5(OMJHtQ!-2d^_*D4L><Nhsnx)s^aF4`orZDsn;a9Y-(=Iz_h@BTAfII8et
z@1uHN1AplsYOih?9*&FnbMc>~s{h~bamPIzQnd~UxN+FNV4m{f`H}UU6)Toh<!$8H
znK$k7zgIE8817epI9odP(2vOv`J4B6$v(L(cI@T;r;m;cn$>zWZuzHddcv&pqxHl6
z?@glrGyL!>iTHK%4pW-zm%urH%_jeQ%TVnrdbmRS(Y)rz>c6F?U;bKpqMCbg`-#_E
z{xgWbsCc2hlfcNZw)qjO$uIUvT`i4x94U9|T=rBq3!Xr3hI%cAON;h6yL^0qOkeP&
zK-Mk)))MP($?ugx?{DtT7PT>X(B~+t^`GI7ioMK#h8I2;{xiJ)^q=9AD?>FasL1^=
z;ZN~@hK0t?{~7k5{?Bkyr2dfe$MgSM&hr2J&!GRGK_viGxY_?_;N(AU|4Z1!{`a5#
ze<r$*8u$1=!_W#l(YXH%NB=WOZ8>Z8?BC}@Rx)MVHriaapTTz}z*~m9XAVPE0Q;7}
znDd9<3&s4{ec)E!?sA@hJF%QLN7}6!BPA_Ps?Xi|`rfWA28~5sWMgggww`wMqxXG#
zTpwurn*H|JdinX59c69FrNSHCkA<_wRyEw8T4CPyPv`YLiEE{IgN|MHom89k_^mj%
zNnU7l@iqZ=7M>GTlW#TH9bU8|W6~?(7iGp-8;{B@RN?q8cxY*$OwUw?Nfj@zZ;dCA
zyO8E(cJ7y{ICuEKKiTOad;dOZd9!^xJFiLm0=ed$w(U=2RV&VIWcqRbTj#b*`tCEA
z|EN0?^OJpQx?;3SZJf=o)&<Om<@rtRWnKyX*!@U(&ocjc=RKb$ck(M2eUr6|TmGZ-
zN%+RYJYgS}7`RMym1=cy<yyt=!0<xz3(ih6z9}nq-pA|Pr0jbhn`hV^T<hM}9?+1u
zop1KX@9p~oZ(Nf<{GUPQ)wfx{e~2GDoVK0gKZEOT^{2c2?tHh9WZ%bOk$8U9^(i&w
z7Y}||y1h%+fAY!~+YL|3ygtmD&Me<7s{EohgZb)<RZ?<`OnaGquN-<SK5b)6LfO5A
z``6+MP3?(4=B~T{Bm3dCwX0titqI@2(Gq0Dc6(QdQ$dA|;cExk(&B$8b1mzQFO~df
z=-qX$H+fa`kHc;e`nivH^rkd2vzGEO9^EN<@T;lne})!2)2yn}v#rm&w^-`tEviWK
z*Wo>JpiNTjrzFdl1$>K<=WD)jefTSUAYNjR<>G@&UfOA0c>5;d?$e{2>+YV3_;f+2
zQsq;&GwMWngKbk6Q4{4am~UvTCot<iY8dL`0@{b!n$>HwAN4o?$$aQ7FZiR|f8W+?
z`q!9cw{U(hKel%AQ$-mM!{%EJw%2x|PXao|piLL7U`Cs;-OIG+KLcvwBC_1;dDOtA
zxi6`{BJi2|_f`h0uKXGE?}=1){(b1cw`ev<OfIRuB7k|tvY(xQAG(7i8D1f2D4H*9
zWBH$f)BW(@aDSJ(1sf~BJ>aW3^0PrcM)W^JWB3_)u0J4o^CP}`XD^p-F{)klxX(=8
z!tTi1_#dLRQ$aRVUO#-F;pjto(Tdf9TB~L6p77SG=V9UGkh?uY|L^7n{O-g!1h>nV
z@GW1=6@@iCw4djO9}W27{B6#a{B^fer@ZD}d-y*?p&ZA)`~?Y1ely3rTyHt~aIf{z
z`CHU`O?rcLzkSX$eAvz_&ARol<{S6_4ABc0@Rx>#yOt!JI;_w<!Bw}SY~~|>fj=_4
z`<*_<1ZUYv>TWt6-0EqtK2`ZbeaR=wV-82F<tDGmy!CFr?7!*Nym!~kFY}nK{GyhB
z<Ff|+Kvxk4(y*)s!+O!b3-?6lXFj}9W60BWefql1ehdCHtnT!4t3A!w|J^tD$MHu`
zw@$tPjlU(~-|deq$4|R9<addERjGYs%g#J!V%H;^zz?q;JN<}!<S+DN+Vi*n)c1bk
z*|hPF0$atWRkhVZ7R-&u1NhTEsQ;OKp11yn*7g4kuDT_n=YI(Q^L*a6|BYz&e}+ZV
zc5z_|;yvAu<U9Uk)oHx=C-`CB+T7(^-!{%K`LI|sAyr*vvEZlU$Jm1%<aWo^f874h
zlW+HbhP?j_zXD~8|1*5(uK~68&VyQe+W#3&e4W4c2lr#~jxUv8=0+cll0Dk<yXM5U
zoV#0_`NA07736pss@favpUkVs{&z5J|1Zt#1?$B(d}u$m*4S^`hRq+#+GW?S@#N8b
z+PqbSVX@kMbq2nz$)m+9zU5Z5FA>oef>Bl_t-8gx_andSkG+e31Rt$>I&UHG*DaGb
z-F9enYMHYBjI#6Gi?s$B%(t?S|I`1^aMJll@_&ZJ;QtJZb6?-k#t?hKd_#Nw$NPUK
zpBMVia6{|$e+E}mP>O&5&+~a_{TtEJ{|t*}HCX;<Xo~;jQKJ95;y=TsPVEcY=TRmK
z);kc<nA4Pa8&s>oxXDAf@#LyENBca_vP>$<)L>ZDMLb@<q-WpbOEX;=qQiKMG_&^j
zZrym}DrVDH`}iN*{|xHRkIw&Qb^p(>;MVmG;r&0V|M|5ao&Sxi|38D{)&=~Ldp2*q
zqN}=P@Aj@ow?c2L1T9ESeBroW<H|q&_>A&TS^pXMzMkCDz4&yb*VBcQZ>!r>8zg>N
zAGfH%CXc^+pW=rlE7L!+AG&UK*xL2)q<6jl8F=2>{m4JGm;3NP>kr2}uhwrWogMi!
z|7XJ4P4T}UZn(cR|13|qz?NXQNiL6h`WCdwwMacM{m;;}kL!BmQeWFGx0}z;PWTsb
z{`Z{qPgohML`5IZ|FCYKaz*-l?usXOZ?C_*^@^hc|GVDX$DeMS3z{B2D$n1O{f{Nq
zN%ghaoF|iP{qFu}I1#|U;bNSB`frDd*rlILqh*7==hQwlVQl*6UjENIHf;V!_XEFX
z$ftkQZJK-`#(qNm#}69J>lUq!$`T8Ix$oSkdk<?9r^eK^t+{}`m22iHq@u69O5xR+
z0@lR%isy%BrMW)u__AY0ssC2R{NGPxF0lO-eKu?Q@p`5kDbBUUHHUxX=Fa}P@8Yj(
zjz3O5(#?PRVR=KT^wRv@g+&qyzurpxXE?rs`Npbci&wq;<+^d_mGx`aJdpo%=~1Zb
zpOfp=|DFAD`BD9kKUcoI=ZiMK*LR=I!M5w2{f{53tLhKrO1^V_-nq5cJ8t(sWs{|%
z`}Rx&nZ^{G{&-n4@9`To=BbsBcpZ0myj`5?RJSD0zG?p2l=(b=ynpnc@7&7r;aS(;
z9NqeB9{YM2|1(_Z|HHO7-u=V3{)fNol|3%&<}5zOKUpH^e(;rlrb``U-nabWT~MKS
z=HTL+S{{dmVorYc*8hF><r@Rr)$EV+|4foC|9AI4!<R)?U${R1XXxGk$)oiC@ACf)
zmpWcBFCe`}(LVmi8I*z_+W%(t{Lir9Za63fxBv5NKUDvXYw~{v$6cT?NfNRSESsS(
z!;3o@Xa7k4r<FV>r~vT)%l4|lwrNlOqyG$lrr5XHe-pL;Wp-~bhDd{L6P?;`T9*AM
ze_WX=@}HrkHiL<rwO@Q~>q4z|SYdYP;_M&3|7j&JR)0gw{x91#kS`D4|5Re9^Pgcu
zNc}I~wG1eOiK6dybhdKrS$*3>FZth-i|z6gMNhqw=d$5mDU|-<?kwT|40gX4{Aakp
zvF=nC!>8{3H$NY|Z1>x9tGBq!`pE?x^4~8_{&ltfN6@alGyXGlx*v%xI>XoHE%=|I
zZ`bwp7aimrY*;@Ao&RUEPf6v*%WpF}|0S;YcjCv>?8`@@WN*BkaYg>G$kx|i0@x?~
zyf3rhKg0A~w#Ra>jDL0Ie>;BM^zlEzy}TR$#3g<H&oFT{XfjISKSN9N<5?g7GjP<k
z-T0?(S^1ygWS9N6f0o<rKXkvyp8xQ3c>kaNxc#mSmF&0oS)TvTV6c|!zWrOx<Nxe3
zeybn1?*G~GBJAdWhEzZMkH^->FKV!9{wK)aQ(ph^=i}WR#s3-XKGwH*d;VwOntsgu
z{wLudb61JJmtj0$|5Jef5@@#TlgQusCYE>XR<F-4KWF)KX#oGTCI7bWO#I+|Ak0GD
zEql8B(ZWLIx`xJI;R^#DB&G6OYit)!D0$28w>9zN!)>nl=Ox!xA&ms=`Oh$bneF*I
zo6?8Y(KB=3*?2EGYU3B=aqlCuGIJBhc@`;#7aHsG|B3N8dxC=B?LWhT8~+)OZP!`<
z=BsvMabh{^72Zn`Dtya7vWqY#fmcXqwQk$FG%l-LDtFt`Ow+GB)Uq?4g-tTxkT+_c
zcv5~YLp&_U;-iONZr1$w;@6d}6y#>E9aa?OW|`@Ic1oFY41GtxMpxf#eQb67TZr4S
zJL%Rl>-X4ZuU*gfcj<qI$JY<Xf4%hhr+eJ&$M@wk=P_UOy?awQ&WmH~maxNK_b<8^
z|1Hp9s+X*I?ealy&h)B1o5ZGWJDShHt21v0OJn1mN$ihYGb)N>;{{(_cs23m_iF}c
zKfB7l(z4c1JG41L?&k9kM=Qe{|H;3q)4kT-XH&<s`DFX%%S&Rk^h0%)FX}t}W7gMr
zq1V^$iGO(eY-_p5rs!J==^hR`jtYg7*z2Pht}fbh>_5Zfy2kwUq#PYnUblbm1T8jg
z?-B3x_kAM7_`Y%8y+zAE^dIvTKl1C*i|JzFv%;4BaTmQHQFg4X`HJ~QW)sF2UqWqz
zLpNSDIr&xA>zQ@7hj_`Poe%h5RWn$cJ}vrFe>rtp@Y0Ixi2YkS9=;Q0YJA@~r_KUY
z=j_yHt#P|xQ_eI0X0fO273Y#|)49Z4<@N}1oG+*~?zzO@5tZ<xqefJB?USfR-;ZZr
z?F(s6`^Wd@%lW(o7uaK6lYXo}@-2@4!_#xySE#JdXAB4u;NF<V#lX^d-?oZ5@Q3};
zX>z6?{#2ctGufScw_EZIhF7Nwo15GhzP{IBzIxjBkME`ze`I=nurt5HR63(&y{79Y
z4~_K)@6LI6JU=S;djFQ&`5j{Sc%!3tZsmOZnP=jvpM||^?i=dP>z+6FXyv!5{~6T7
z*Zya?KIuQhi;fKD+Zt=+#VU@weu$f<|8@3csg1WqJj^|{tF*rV`oLZsSFwK!+g$16
zrtjNIXXn~Ex9mz`5q#ap_=j@=f8wH*nNoY(_AYf?`Et5uak|yJK9~1z+yfjRG+){=
zU;Izv1sly(vPu?trM>|&wtE+_D1J$^{o7#oV*%gmz`fl*m-XI13(r`aSUE?5wQJR+
z!g+tf{G0d0FaL4d>EHFsi95UQpUv2IXVvPbh4Vx^j@xa0eqCO&Bsbgj>Xuq}|9uxr
zIp3&91bsEwBrKi4_QrpQe9YDJH^2Y$obaDv{muGG7uZ(?#+5!g-&4ikR$6xP?b~--
zbA9=)H=T9lxPMpo^&7_HHKEe-eY;queQyrFyj9ld*DYa#Epr(%8(s-avV7m%pL2nI
z<;BQ2sq1U^1U|fNn{`PuY5Q)c7n2zz7(AY{^xfNf`1OAVX%k!Hu*qJRa`d;JKG7*u
z+ga4*9=Ll_;Tw+c3;2B(?fK7eaKE^Tym*$or@@_<yVhIhuDinfq{wOFmEST_U$_{w
z&;K*~&u~)ldi~q`{~5fL*E_7Y|8X0%MnCpH!}j{W8hs5`{7m&HZ9ZK8+qAa+OVD-D
zAQ5W+7qjm{+(P)Ds}IHrT(n92*nF&P*3*SMo20vE2D+@9k-sI`GRRsu-s}45dGa4~
zL8F~IKMuV+eE7T7mC3Ppj$Al!W-0qTzsD{4J|*uR<o5q(;Ie-B{V!w6e})4m_y5@S
zdFo}=AHE+CyI(fR-g)2inv`mFrL?_HU-H4PTK5fX3y-g>;<PibnBsP{PV!Zj!|$^?
zE&S`>xhDUyF8*l#DDh+1-jz|R&t@&SooT4e=CI_gy}|DV8*PNj<q9t~*fo@UJkC@}
zbqVk`mSM2<T<7KA701@~`OVr#+5E;9CyIjq{btU89V31$x5(H3Xq0**+mcyj^UJr@
zx;=fuv71|fQN4#@O+kgq+MMOBb-c@_{+<84qIPoLWy34N=hUy)y?Op+0l$CKBk9!@
z?{uH_Wuye0RLPNykbBU_v0nT~{eK2^?#KDRo~r-r>09u_brnB<jl;D)+-xgm^-W;h
zv0<*Nkj4uSmcmopEgoys`uczP&+yMr{Mh}kr~Lm)$}&{T{#%&7^ybOR`cmGtTaxv4
z&IyKpG(8vb$M-|n;woL;J?<%&H{M$HCcy9uYXke|^;*~8Z+YKgFY#_my<pabn-$s4
zJc}grT(oUgIc+$pJn6_M%UgSrE!Rzt|4@I(H{Na9xoiI!4*#k>;iRy0``+G2>5ca0
zXKWq+)m&g-@hWO=Uw_i~TfKL}wr!jsmpMCYogbV3l}7)K`?g$pvuN+e9bM0E1@5v;
zN;q}+M%Typt<yJEe1F)s{h@ADqRF;|TRKO#Cz-LIwtT;J%7b4TtK%6=EBo8G+VjLc
z)i<f$7O%T&m7%44^Xh*|4Yu~-*SE}N;&^`bt8})+lH88(LM%Kt&aq{kJ^FU-(%Eq_
zcaE}d(t9>5oTJOu(}wHgemVF544m7Kz2~n8sreQaRlDwEb#c4G-_T_F4^}VhRDXy^
zeZ01EQIu;Xj}_~leW_p4!Xi~HC2uiJ@Q8V=zFLamioNI`4)ss<4^=nazie-I|MzQ&
zzuBv|?AdxnS9#aw9bJ#^1@2UBJ-nJfz@2T+=kO2n55;QxOu8Jn{i4prfTpN|&HW3{
zeX2K(x&P*I+54qG+D@uf>O9!h`+)a$L4u!kiq-}8HSUjUxgzy1npP{_DpdMVdCKm5
zljHsKr%x!)_d2ol$NUGgPhZUB@6_=*x~=z2sOPsN2K%`c=i6%3<$LOkD-QebpB4Y6
zqLlNU-rC1S`!iB!Hfbu)sX0^6f24fAkex(@mpwz)^fD>iy}fsPcgMJz?s{t(I7#~V
zfyTZJ=1Zor**+K6ioFlrzkSp8bivN_f>jYs4kzC?)N6gzZ`o&d=}nFMV@=cb0?s1B
zv5H&F!|k>8eim<5U&i(2+q&HkpUq8ve7<>??Ea3ot$zwOGpLlw9DCPM#9^Z^df<=h
z2m7`^&X4RRqqBC!lpEZ*s}!wm-LLC?ibF!#M(gXB1^kN+@+@pTp{#J}l2%K=Dg%ZC
zB3t0&mf`(BQ2Jx*6VMxD4sr+M>_683^O8R-|C?+6U+H{MXA4EZLGB>>p!ezp>rhw9
zUSULCDjU}rSC6t?rizu|G9^)PgDWeltA3Z=7DkK}5~8PT+^)Z`(SKm;9dUWj{;juv
zM%x^{v)^RJa^)AtmV98_S^3DHIkRf-e}?JbR!=EbpSC-^Jyk;9L4$eeqOQ;SJ$6>t
zY6P>FoLnMZd);5_<a77^t=v6Dw!REixguRf3w{S(V05kMKPul<r}Ck{=eF0Se<yNV
zcX@Xv>fE$EZ~1L~+=Z<frf=Dfhi?DR5L(W3immRQyYeeO_lFKrB7qlDt~cENlX-os
zey7;3*y%UzH?A}Fc$=&yv)d(wd+yCwj^`gc$o+K{LG9m#vwJNMXZOMwt0ih|C}B~T
z2J^xFne{)ue7OI&WXHe2YkDg`96vTs^MZa&?wVLH)6d(gb8Yk_lj6!6|LFFGy4w_=
ze4PDl`hSN0c;5Q!-u+KytIsl2{%1JU-x2uZ`Qe3nbHgJyG3s4j_MgF%;fw8GzS>pS
zkG}t@YW1JtZ(;mjiLKA(3x1LQ&(IcUw*BMxw#P?%K0W$$x572;_o@0D^%wVZz1>s!
zh?`%ehWl!1ubJ<+?_wt$>?@_u#H_9os93;W!+&ATmi=wt#4j9bdLMA#^vI2$v-zBQ
zelf7i?X-P8<Huc=uz6M=<d!mN#qPZsHr0)rWs=f@zue}}WA7i*|EU@$|DRzU?|+7c
zTMOl7|1&&E`@sLV?(~0#g$Er3|5VI-9Qu*HWsl0FEoI-oD=s}=mL75CpJe{Iy_3Bz
zt=Zyh#B1%h-MiXDK&5cO0p{y>e$D&(R9^OruJfsjHL**Nq|A1kU2=eDYV4^~Kfdtn
z`OmQTZERZW$Ml1@&h^WFnRRz=&u+$Px85bnOx#|;`1^j~oD1x8Yg{+AANbE8S!4J}
zR+V+fm8IOpBFS9Oi`KNuzt!$PlD;;hCaZCe?W6acKOTuHM91jvJ;}VPc@O{5nuC$%
zzZ!n*|Jd*0t7IwfbWt*8+S4hm2@=L~Re4&g*yA{VRZTxC%luEszCCL1EVY-9yWa{V
zulw;Yfqf^#`o|73>yK5kZP#z9(PVxnwQtcTkAVLSr&SNAzx+J;KLg{6fBf}FnvcdA
ze<-e5nOXg9cFKQ-Wx2=d-%k{O9&`NAS+9JVKh78R^F+HU%AEy-?(*zeR=32^{)5!!
zkKRGE55$Y~cjQiOdiBfX+R^F%s#>*{e^3&3kh;(H_&-C->__Q6HW~jJY?PAIGoDWf
zSDyGzQ29T@zE!f?|NPcR|7TdQ`k&#V!1e48{SKgg#t*kmWnOmkUGUrDs&|to&M(;Q
z{xBvCy>UD9V|&+^k{{nq&vzxwdhB)af@^->#CFz0&6AciexKR@VC&6$e=0ukElxF^
z`gGc2+bt)8JeF*ATEm}Ye$Bo6+aH@BxB0vE-%7=Dhj*Wv=P374_$^n(*HhZnO@){C
zGCh98fB1H->H2e9m)Inhb3E3%G(S<|=jI;)?9={8eRO(RW@oW&LuS0+rlwf8qiOO6
zp}beBCJ5Vm-TOQ5i-+pPQhkwFjrqa5cUSEBX_>Hq>4x2!GsUkPqyIDTf1T_9NWQ)F
zcCWwprpsSu@BOyTjl=7tN{T_!k&V0!At?+-QajeS%IR+ZQCYj{hx*Y?yta>b86CFS
zDLBc2Mfj#iXhN0D{49pA{GIotH+@uZ*c!1e(QVePd*0od96uLN6xnd_?VN<)QV;Ef
zUlnESXDqGtlmEJ{?On#_zfT?9Uw2*mG5w>vWAmZ%TWOsu4z7GP{ez9*(%GF(+Dwu+
zrz<2DACGyw%{SnO>C$D}KX?aRn!9c7lj#9s|K2@ewr|T|{LirYU3;#4%exw9EzVNy
z-!ny**F`8zJXU+V&~8sv*!ttPOy@&?6j!eB6sq0qr>P_K_!+~Fu$W&0RaK2K^@rl6
zZdHhy^0WTf<^SgN?Cfb3DKli-6;eIlSlr=vl3wp~<Hx~|%HG#)3=1Eg=k@=)ROf(q
zr3u4~hpimRpG#CG?6sTgA;OquoAuc7@AQP-TZ*RhW<OD0c`){~h>F~CmdE!t{FAN;
z`e5E<qne-aZTo+QTl&-Y%(5=EOP*A5?#%=?hW`w&XTADj&v)|C`kTA1yEiZwn$9*+
zn3Lu*WmnI)lL7Wp%Rj6?eowOfu-HHOk0R4%#jRcT?C#M|9fuqI_?|rwywFrU(Lu)k
zsD4MC*hlY|v$nS{+Pm%gn>^9hBNJAo8BQ{CmYHy{@T=*UWs|E+ZKaO=(YsihFLUF|
zody3HqCUMBukqdF8}57I4`=Su?5MqW4#v#r*|6w8!=9Fu?<Z6k__D7(4w|SmcPAI)
zEdzVT8v)F#33nh=KE(IhXlFl&=a0TqG2N}LV$122NtXJL!rTNk6q;_b$Qj0#&0L!q
zbtPYI>dKq<m!xNJUGrKa_=VTxuFvtDe-u8bEq`#A@84&q&wA^&{H|P9zxABCwcFA@
z<_9U0FYP|hTVe0?p>@x<U!wgtmF6Bkv9I{#j>d+%GK&S=x8`kKnz8Y?+Uk`zZ>Y3q
znO=D<ko<yMaMs!X3@z^u{%81N`l9}aVf%lEu9Y_45B_o={q`aH^)~zLE_QvXpXcQ+
zxpDrKNZYypM^HS!{foH&3@2~*|Fd+E{Zgc|am~bCyDWItELUj0&>ZnS&fvq*)+^Jt
z&r`mn`%cL^r7Qcz#S6a#qLsETXtK_(SkPK_REWj(Tmi!Ynd|ba{-}L$+rBC$E2^*a
zd0dR$&y?RGr+55%7{mC3i{akoKY<V5M!kr7x$E7`$^yqk(XDri?AU`_Rj#$(y}Pw_
z&Ghi;g3-5+n0xA+6BMYLR3UoiNBzU)Ej9Y9c)i^&)t#+a5#af4X8coIH_48_+6%6h
ztk%u<-f_89qUN=A%T1l?u6J@XgE;>1GNk@zI8^@NKf^DnFZO>V4*zF38s2oLru3s)
z>to&|iRD|aerer5Cwfgo{i@H>c_)Qe95e8Hr+sbzL(dwU{|pPO|1+rP+5dQ2p?myL
zywIzE2G?{=Y=t69PUJuD;#?=O#9Qjb!GiOrdl*lgT)?05;ra3RoUeW>T{-g0_27e_
zhtG7|o!ft=F>=?ug{dq5UHT`uYs2oQS9iCpG&{}Gc;(Ob*yEz^hv#o@onx)`QEuIL
z-Yd6n@^*gFyEE|%)0`UfB^A-%`aVpXyXdCs()Hg`CW!=jrfC*&3wf$M;M+Ha@!ix1
z_c{JEJlR(9{NJva{|qmEtIrnBllW1b{IFg;TTXHN-W{`Q1D9kfP0+h?U+r^G@%1i-
zH?I$Fy&dt#{&zseb+H33yQXBTbnRi&+vHYnwBT>$`pR|MN8|Z){`fyyt9>LraM$L2
zmz_9z9KE((*W1ZbHTmZS_E|NF7r(ry)BRB%eEpIBf_q1{edpadMKdhHQ_GQo-T8;q
zyTw10KfH^$=JBPhBtbGVKTV<eLi4^|{~3gSq<ekL>sOn;CYp2Yop(WeQf9J}{z>+~
z?rNX8{=nsea#ZAx=TXf*HqwrBk}tP$|7YmCz;cge!;eb#Bh|0vxnErGUXfY2W72<y
zw>%$;TT>;ACr@(se`xCb@IS+!Ir_c(Ur(9;%gE}>diDx+w-0vHu35S0*&LF}d}_U9
zN?&WgtTh9JeCMhv^@f}+_A*)jG}}Hs@}BD(nbLaT#>~__!}A|5u=gE)=+E!@@qOII
zJ+b+_WF7B4J31?^!}!F(1FRnB9239Bl&*K*`9uH1y;IksSI^Zr%%TzJDfca5)q<2s
z<=5W#{*%k7v)<ZkYBEWCg`>dT0}d0}mn*YW%O4g!%HPOeyzO@A$9-F?mus>_O>#K!
z_OmhnpRxwie**d~dM1DU13t9%)-Jz&(&(V}U61H<8l2KaZ!gzxk9uxu*8lMPTUNfM
z${Z#01#WLT^}IFG;{4;i0sk2eoUQuSy*Trq{H5G4d-ZPSzIhP4QtIE5c}pgrRNGKf
z{eai{5!>wS%vzgkZ*w&#SuSMQaN=NGwH*Hg4d%Wd`q^q*SNxcMyxhZO*4o>yxw}^#
zS+2sLzia;ot}{QT*VjMt@3t~;ln$BicS)@`s+jwYu^3N*f1QX1yR!Pi^|2rBHD^EU
z=ezM@n`@8Q#5X*5Bc7}FSx@r(lRf+6`@_7-$NTe33vIqd_3nDx^^PY!uZWrX3X`WS
z$N4CRmGXz?x7$f?tbV;#?f%JazMt;Io%y7}ypyNw-_#Rz=O($E|4}VEv^%MKMx?mJ
zq4~x#VR!ova;WU++~fIZZhY*v)@Ic;myC`TUXa-=Q_Hbdp0`GI`-k<t&#!;EoAt_C
zGxpBQuGz^yn;AISS@a$^_GvKB{1N=0LAC#p_}^OZ{|t-o?UjC{|7UW&Q2h<7{J#uW
z>v?~$ojx3=b=~xL_36%KSD$36_Bog>)Qj&azVdzT+j7tTxC{ATW-h6Cw5Rbwx=*Sz
zPfA*Y#r($kpFFqh|Mk-TUvJhkJ2nBE*pCe9^S2#8{&&l~z0L>kiEsaq-<G?@JGwKy
zjjc?`iy?VuciFG&!fR`|FMRc_dLOpsYN@SN^iHYB<n5)mclP;NoM3tKSgpa<uPaOM
z#8o4|3v9kth@%eB_weG_z<->2)A1JdEpj>^?#=wL-EH&!k8D#jOXcV9lYF1td)@ei
zTgaTACjQ59y~`@vU9K&eWUC!<`Tae^o!72CRn@Zf2r7(o{;>UFeshichsT~*x8_Q}
z6~FxRUdsNgndZ4OuN16fc))mIz0Zg14SS3q{O){bqHF$U(z!;Jd^=Bbh9dTJ$3+;o
zuE+|L@?E*>%KF?jZx3>%?wp$J;O{t1A<X}edL93NhK1{$|1%uyvHueqbN!F?(u&32
zx#p2ufBVk1D3+gb^2CdxCGyvH+N0NLSJ&R%`gKitSh-+K^eJ<99X(l@swNB5`70`x
zAHL;#aqE?>clFLaU^u2dd&lDfe&;rYt82BrH(fN@U2E>wesc3R--+%!wq5;lQf2;;
z>-Ovan0)B(GIhJMb*}PT^PWbY`I%EKWZ6U;{%SCLT|Oo!v42<Jwev@+T#p`Eo>S;t
zE4_QZ19Qr?Ba>7Dw@k8Hq@Z)=IHSWW&voWrKl~r`3WsdJvgNJt$vxX^KYD&!IXm;~
zVN3RpqBmk3Wv}kp_F3=6Qm4M1%35w5j~WdAGd#5ZQ2&>=O8y^D;(vxmtG#`$ZCRUT
zKH2NaoA-}<-j;2ho61$o8B`dP|3_25ef}5y`X_IX|7Va{H~Da!z=b`ExwiXSlfQW^
zFZiTC=TlyS;rzLM8cg4oug#3oDLlFARaqIQ;;r6Nt0NM>n2w}vu1B?PmA}vrd&iHh
zdjztHZ$%iOv?6*@TM-kHS`iO$v?AI-t%!Z#R>aP|1u_)1B6MM`2!REY-x}>dRn=oG
z_$%CT)yuG`d(#V-Wf}auR?fL8;=AY_Tcag{KcpB=V>G4Kv+nru`jId9F{vYl{+X*^
za@SjIk7W4I5c<sQ?j-fVo`kt~!q?Uxn)IXcKZDcve|~rBKc4y!-QTiL_3EF*HBlK=
zDjQx-xPM+mIL6Rpn(4%*hx4aPFi)K9Ab0ZN`Tl#tSHHQg?EKQ*eDHJoOtIRz^)s3F
zD)Bm<4%zIzntO-d+8cj19aJc_?^rMX;rW|~>3k>mxUb%OxYX>Mb5bq$c9mL&_2Cz5
zR2TdRyK;C@=7h+$1Lc9!Yw9ceBL1m0n7mkox|Zd^K9l;Bs@MKAylwy2-?#jS^M3}F
z^-lj8))m#ixW2*;kK_WrTYJpEP595y&(B|fJ@P-pr0|{LZT~LGe|-4x{jb;dpS&2p
zi~eqY>Fu$<pjI=3`QV=TWe3;PT{Mfzy>Lc6Oqctw6sMc}-V%A{ubOXjO8eRG&xy;p
z?j!2|om-&rjfC)j1}4$zYe46gytx1Mo&6V|smVv}KRW*@y!gjyzuNqD>NoTH9i&@T
ze(`_J{UOEh#!kDU|DWLc_FKJ2^0z&ET6dD&=3Pyb`;OS-AJ&UaK0N<J&yU#;dlxUe
zJ#*>yojV@SeP6Jg{SNQy{|t=lo*$3@7+sTip+;%GZQm@r>owNtZ3_Px4mkDy&J6vh
zRev(|gZ$r`=>H5a9TxECqED8tZeAL^x_K$a0Ga6He{!HhRz9ZxZCd=F;X-cq4UCZ?
z&?Yih5ypw@D&k!}Y8}{hZZ_+ShRFg`gg4Adm!I%wbzGMQ)3$}nZ)I&0j<|pOcwx5H
z+ew;ip{=P=KlBezt1`XTpTFex@#V1smv8gDDL$@}exuH5`-%lzyUJ##FUkCHBlFwK
z{7jD9GM*RRijO}JEzReB@$1dE;#ae3%!6)jtS*~SEG@*T{O@P{sjV6dSW<bGMlf+4
zWZzp@zMs>EeMRi-nSU&ERMgc^PV$&;|86nkukdVFhJZ^ZzWNTE#HxH-#L6B#Ptgz&
zepTzgbM=#ygfEM_7>-}~qq(pmb9t_`_x4TMz56md3wKK%|7^1BdFvN@VIA(ZnG@nV
zxFz;noICSa;nxEoKQg$6x^Q%fh9odpFl|=;aIasyL-qRN%MWA2LT{>Gkhj)jdi*KC
zp6kk@2JQHl->TM5nO4=h#ysp2k4>;bY5sxh%HoIk84j1|g&!_^dhncFq@?}(rla*<
zZ(l56a|W-wcbkZ`?p|B^h#cRC?_E>bwlBGT&xl8*hig~A!z*XTx3lgnYOu_ee|Wt4
zi|CBZTcTUsGH$7t>ncx4Y?GcdiIJV1ZR?k{!u}t=&AJw4+GBQs>D-3pToM0j3$(qu
z7(TAGKQ@o!<KMGa+xx!6+IwDI^VCqM{L+KBvJO*P51y-w`6W2-roEu)e3ln={5wAE
zZ%WuOKj`XA#l+=F!P^V$jeDNVNzn6rwl_QOgL#{cum0RM8QZo8C(9KEO%i+&>7o94
zSw<YghvjYSSkFyfc<c5ZzR3cy*I%6Rly7D5e$Xox&-X*!{ZjgYdADwc1@Ji(pXNU^
zt3Ccl{X>7@JGF}=F6yq+<2X1YulM$=6OQ)1C0hd6_}l-8KHSeWEmz!Q`<98vqyo4t
z<!0ZQc6?iQRhI?>`Xt$2(7I@pl@SatY&icjsC+&g|BY+we};?Rv8=`aSpG9is(iTq
zH*3^?h8IiMH6a=?uww>@Ys_dYTC{U#)46-z-76nAaU6R-DV%GSt)~qa!?dnPX+L&9
z(5^hA|HwP(!DGWEefz&(S}DJ_<d)mcrP<%+XI(1g&c5MzFhhlR%P*M{yJHO|r0iWG
zZdufN`5*iLGpO=E?El;9|F3sDXk;E$WC4Ey+93PY3*l&IAuv2gTjRLi9`*FFy-b+l
z2Mb3S4H|FXs_SZhS3ZiN3B2qBv6_WX{n698*A{-*AF*}Qwpq^}{+T<`srBwglavR1
z{tWXE?Rk92Uce^uVLW%li|!AfLwAa;I{jQ^s(EU6$)`T6U)T3tbda<B&%oV$bpIEh
zt^R+y|1%t0)qm`?ZSs+QQXkgdUDjLm`9#0Tw+{zCnXpdeyLRBj!4psTroJ-?VDIDa
z{-?{p|0Aqwx%}7b{~3Bi_v$~|-j%;K+uHJ<?5nT~=dD~mZ=c<9bq3RMLp}z}*SGd2
zbuoN=8}E5#$=}6a=DM2v-nP+$A$w|Tk5|n~cHJI6kN5X><#+Fsn0(|O<F3A2V$*(|
zZN0i{(bF00?**`{Ow~PJ-1Y9o0)FKm+7I7`nP02%e<Y!od{zJHr|EA4r%X;bnB2s8
z&_ezLSNEg1$t&ajM1AnRyKa8D?H1t!X|psUCmE=-yh|{!cs}c!6y{=4%8PjToDOJB
zdN}vv`CYT@8%m|4b-m90s?<~Z9CBMrfX(B1F<-^HS9_!%bFe>_Z#*c;=3)P3QG;py
z!zDJ_{}~+CrvLkU^gqMVYyTPA>P&w)ANkc|Ra0~~;;zp@CGQi5+LJG^1szu3Ryga6
zypsISr}BS4X&<W3n5*rzeaprCT-`_Q5!0SdkO<K}HRGJ0rA6)zyDyCILqesir)|2B
zxbv!N#4PF5>XkANG`Xj%{3=~<wByIe<>$+FqS)KM)+TB6NmV4@T7HK|xPOHRXy5Pr
zAKH)ZbwAb({L+|Px;(3kQ|!7z&5u_ieUI01uzy_}FJ0qw;SXn~$gIVuRge7ql$#K6
z$<At?2SfGbFJ~D_OTzo>5B+Ysa$IlaL*A@YC(nsJoUZWKNJReJUVpc1I;MY>?uT5~
z6RWmvy(`n)pTzOT@r3fN27S<agY`XD>g`+GF0NVg?(5t@K9#1EMas5Uwy)p7F8eXy
zNBU#8?zOK2U)-A8`mA%sse{RVA8OVdU-Z0*^X&xV*A8;i(Pweu&=$;Bu~z<PIClT1
ziq-qSyZ<x1bSYuHP=U5Q|M?&Ek`G($uL!FB;YX~k2G#zkD=q`rH-w+x)ieD+gS4G$
z%!`R}-uVw>75ir-*}pa@d-p}>i0sZ63+AL-ey<IRRJfF%+HIHQ@MgzXE|2xne`h`F
zsi=;bk$kp#OPK8kVdbxB3;5pMerVp9o6EZCN6{r$X4a>7-qn5=p7ohuqQW@fNBE;!
z>m&EryFPhaF8TCMC%9xm`HiYcJ&dm>ah$74eqp7-q#fP5YfIZh(W$Af1u_W@Zk4jV
z#ZvQ+oM(OUJvX!Zk+jKNK3Qco-{lwHu(RK|Zus%`(eQb4A4<fmO4f_sTdCQ8#<?xx
z!0E?#p1NO{weul=^E~kjUnCT-d|93A9$0a3=d7pYf1VVH$T7&WSuWta?sol4zrd~8
zbL9iK?QM}Uc6zvNzLQn+*QHlY|Ejx8yJqVj&~)|P{nL%9Ig-+S?9c42KCG9@c=;j!
z_%8cS8=I77&wa(RR~ICOFt~kvzL8zo@?iXewf?tMm!EH$b@8M9tyDvUH}`kns<#PX
zU;5f!utxde?`xZ`e0?2}7vA2HdXs-KD?9t^Xeqbt%d)>IefQko^y$XjeJOep%%J@v
z>w;Z=cs`Uh_VTVit9CzVcj?))cW?ci<n{c$ng8AAmw$eaQ9ry}<wO7R^+FZNe&tfh
z(Usl8hSq+{rN7?tou2RPAo@?E;=cFpkJp=*UfTNN-nCor&6jbW2#Ghdd4D{I?e+V(
z>x(OzJwEd8nJ#6wkoAq>zT*|q1!2bu>Y3w&KL#JXCwIxF^1*3uUOfqgD|+AVDos1G
z;IzbZ^^5(_<N6;RyKE==$MWiW8_Pt|DeD|MgH<ZmxA2~{Ww&o-$bY@h=EL^3b#`Z0
zzb@N$UE*ZB>K*>l0FOUe&srbV@*lZia!YS{Zq>@~NBlpl|2|bI-?3iqNBP1(7Tt$0
zWX;pKW}@pHvtO?E__x+CKFPZ0F0Hv^zEaupY;wgpf1V2NqfzeNHk_+pzT>$U`)+<}
z#iTaB<xhDEmN4-rluc}Qko&0LwMD}8>P5Ayxye`VG;CY<qsZgL0lP(8mrPXZT{7{i
zAY)pZ1q0ubS)cv8<yihRv>bMBj=5O0>uFQx+LZpbnRed})K;(GFZ=rZ`+X{}=L_wz
zUGhWUbNeLSwBLqVlasSr;+FSyxu3Z=zkJRG_HBnB&*#3AIo)cC)zjHGqc*L%-2PKp
zr(nVS;5g?8>t$Y=?U%3#uU=8nWO8X4|F`gI#ZS%Oi}db%ye#`_$@=zNY_lJ%*rTgD
zD>j11<dGlGCnnCa<28+64#=`rd~dQ5e|XmWv3hq&^;s>+Ex%3cHy=2<?RcjMPw~8#
z3+xxRT^G0)b+|ZR=`_m;l|ENXo+T-QZ(O|tZrwVZn$Eg(OOk~`Z*niYYp4YCCs9+B
zvOyI6=tUgmLhql9oo(lj+w;C#m+Fzw9+asv@tmlLcb=#E28QrW;XO4uo3~%7$^7u_
z*gHert>+eeI4!A?=Qin%!HMS+d>P6v{|Wt|yVLrQZ%W~R2Ah-he_5oS{fYmWf8hE*
z-s>j6V=Gg4?YjI<p;hcw{>)oup^`TnC%LP4y<G4k@<DciSw-4`$N#Dn>;H1h+BJ{i
zLrF68p<SB)8LEyM|GCy+P=`3d$^B$pW5BIj(Gj<fXQ$_j?%k7|<;un4BG<sj>Wh2V
zzepfN4%}#&wrKvl{<RE_=;bmo{BTNV;8C`Qte^MK>b8I7NAp9|wr%IlEi`hM30_w$
zJadCY@;uJ-596J}8)~w5H64Gnzj@ue6v5jEpPbg7ICs_NC-ZDu8FK0mt^U#ZpTX7a
z_rHI>{~3;6S6o*Se$<}Fba%ewtFxJpW++Y!T(-MX_L0hmy5xiB-F`OM?h0RDf9Syn
z{=Ym0_4ilW|Csgpeb;{m5tDe4A2~n5553h}S-!$sYO^bM!@?K?W(BdNkP?Ywl?(WN
zue447$o+7)b-Z`#vaGO^+UM4+x~QNyL#RUHmgAr7iI?`N{;~R?)|vhAuYSP1zqyU^
zU%ZRnDhp3Kx<Rzl_TZZS4k@{n59i;!?{`UH+wbz0_&1h}5t|KE%7jEk_kRFy80Y&E
z&ysoQdhlf()##MpK^~_jo9vK2&A;ck<@5P@8qAGy%KpN27w*YiO}wP{<-`-$5b?k_
zlS-K!p9#MIUNwLBwO#(*v&8l$KfWt`Tq<qat*vWYGB(}Tnx+wWbCbi#gS=Omn~bW%
z^;cCq_PM^#=7rRc;_SuIE4FuidLnTxG*~TuUUTupAH`i??8R$*_J1hvFIiE_k$b-7
z{X;%|<<6N06`Gs)`1me4NZzVg!1LwD{Rj1eFRx4a?a!`WzVu9*<fTXQhHP&9+LMp3
z4D7nIrQ7|3`61n?&xgw2SEfzyEzULeQ9aUHVBjFb#_;&M$v%~j{)g^ndrb3954@Pa
z$Z#HG_?*H&3N;V@8pgOEwCBH@EzkESJlTG>-1L>x+Af^Dxy@*Ef85Fw@Ah3`Z2qwM
zQF@Dw;KH+i7pJc{yXLOf`Ql;`jh``pR!Quzc>h&x?nnPe>CH9853hEeIvv;_v29}9
zwMP+x1;(E>6P`Slw6$uC<v-Lfm02gWap#Bl?w+c+g^30KqVKE|oZ9Dc_d<~;%Lhk^
zwf`Ap{zP5cayz<OpTo9q^SW4{l_G)abETT9rz)#_VUYX6c)LP4e0J;ywa%3>hMCW|
zIBUy4Dlj<yQDN4LtYy>m*`{WgGIHJfsrjvu;}EBTIjf{iy2a`e)JEz2Ke?dO^49%l
zxV``16nD@XAk?jd3?-m48>M@<^FIUX(n(0hhGlRNP1Ko`uivbC722TX#$kK3=HrLo
zLH@1w!lm=MUiW{iwflPf=DQRNKh94#<{g~cp}tY(i|H5F;yA+(a?OvsRhq-MC970^
z%1g9P?Y3*be4|N&c}W=0!YNHlQUott@2JWB7&2?_v#Af(^Tq6$wPw55=fAU@xMV~_
zRI1*o^c~>2^7@0&x4U)cQy<?Ax_5l}%_F`&RfP)jOUvvyeuO`K&wMKHZCLbP&qlUS
zPnu5g2P`Pq`RDHo=GGstAO4=L&s>r9is|#ZuGcJ^WF|MQV&49!p*A9XeMNcv#*b#L
z*{d@z>r9AI$YGw;d)I#<dvkbx&f2Zgnrq%G^i^kGSTfPWXwC8!N_!U7H*5?0UUc%|
ze+J>KTP{L#_6Y7w?00Ys?q8R|Ec|%wpE}i#?CpCbm+m^hLb~^|R`FA@Lo4j&<f&gs
z_`bScUckNd!IHS{iU)Ot;n@%7w^p$?<;nfJbGCZr4#o9)9XdN-Z{ZWS+cUpv_hWyC
z8x^;5wY+cdp20U)+gqYJ|BT-~KF5DA7VvG_<L~;PVe*RW{~2DNsDH5{`?mJvEgyJW
zmml%IFS+#Y>A1T`pW3kJPh-0h(`WpLd4Cnd=l4BN^pE7XuQ|dhzF<;y-7l@z`%E&0
zU(0iUHQ5&Q>2}U8F6&ih(Vt?Ee^_fT^T+n$pZd&PzGdG9PG_CDa6a#V(Ys^r3Qc_M
zPlGD74~os>y!0+dcm2Be$vdW>P1~Jtq-H@v0-wbKe(N8FAHF}_-~C5)`TcDZCf`@_
z7Ja9$#<Q8RP1X59x^%n5!DT<rADYiqBVM>XDyAat(WhVkd?(zxb?;2d>jn>_K28Px
z_(R)D;~QuB>{_y`zx`{kv%|C?@1?6#*?%t(aQJr4mSL?vXN}^;J*I1Tv)+ArA8~d1
zlpT9=TI?27{hm~KTZGa4$b9MOKXxCc6h<<XT3&wht46+}hrxW#C#RlTKlZ9m*7Mma
z=0EJ4y`;vWty29;`Sh!6?>x^sQDyj!ui*G*0r$1ly4Mfqng@RTVyAm$ecpRL-H+d@
zBh7vqCck@-Y$HE=eO&gsJzKV4(OJHEcW2u6xjaS39xG1i5y)U_Z?|zweALTyIpXq?
z`}qYk5B@%J_v+q*kM_)ec$4|@e}+DhiIX-7*uPyBUil!uecL=H^NBBS&HeIiy6>Lb
zF``<%f1e&;w|MaDK~<Q2mmSv&Ikk^0zwX~ImFC{vBf0OaQfqQgI;$tk<9Qj(moirE
zO6|(5@|`zdre7)Qzy$4`KWAvQp8Wo3E$W)LIPJ%_UZ*|&F4<#PyW*~EcBa^rdnuPT
zgvNAFF!EoiP-pr!^pTzA#V>#Bzx8}LH*;0A?%64~f-5H)Dwy5hvf+i}E5-$Uuj;$&
zn76Cfck24tKcBVxKf?>H?6WoHkNx>8_CHcTym7%RrO$76t@?SR^K#zHgxlYzq+3TX
zUs^Rso@b9>9@}}I%;oZ*FA4l-_$8L@bo$^v@yqLycW(OdZgPI1be?C@=H6KT6$wTf
z%z0>oAvh0V2RA^x1O30VUu#f6AKfK}4;~(vy8@*#0n)c#{=@e_bNRdN--ycpYt9BO
z&3uTw8?3<=eZYok<JS3{74=7qkK1ir5_Rvi)EoDi=AtprnYLvt;Pd|{+TUdVBI0uW
z`x9$*l@Hm;UPv>}j8g5Lb0qm}7b}0i@7==akN}oqN6|Avjy9e;Mw;QhSBy5;I<l`=
zXa7%Bz2V>WZ}We+y{$R(TzY=`-;cZVU&byKVLbObj^)Gtj>Ik9!54Fv^_?xdy{cKF
zw|<wI;PLHVm-k&X+3W8kmHTggesPM3MpMRN4TgjSxt6M{zciS;+8@d{EZ8BuyzOmw
z^Bn8l>;Ha==)bgELZR&zOVd4-E{Q8z?ozFx%o~^&tkhV*V_Sc4pUg+M%WE^muIDZ0
zR@Rf@cR0*0cXLIFJmZh(!@_?TyI;&*yZji-4ZR0dY)Kw}cuPW1)1RwI0OP>+*g*q0
z_;KKsuWR*Po6lc0m5$cfZF=NX!(XGy1>9G{XJ-9pko_aR+_LLj_THeJ2f9nxC4cR^
zP|tHM{EyE1j#>X1<fMMost9hsGe0nm%YD5y`>PD*8!Oh!{by(?e{8n%?0<$XlfO!0
zuVq&?z5cQ4Kf{|Z^@qCW2R%O$XL;?b&R52+{y(+z8f@)8ABj?%<FW6W$?qVmpWi)Z
z{5hK#^C#$o=|Uxc;RrMP=aXmJe-X%LI{iStQP<(~+WdUO{|vV8W~Bf7aXWh3eXbhr
z42J&0VZQx8UrG7@<(S2AX8z;;!#_N^*PPUp|9tLJ{HsMPJbyd<P&D4URnq?Mw%0!=
z#vYO5{jhg~oO|1sj*owDYaIV~I{fh^lM@b~w!C&Z?z=(b-xS7YyuV#5OoY3A6F0uh
zm$&~Zv~#ZdF*7;)zYF)6ug)~PKJoVL8{RBU&rEqZJkQmICHBvCE55e(YeK8xHjV4D
z1`CpwVD7`fm&a$l-pR2%&_U$I%BS7-Mql4)VLMg<bvg`nq<Ct`#seY@FRmB&JD!bP
zwumJr7)u_P-7>Kx|5kH=dA!0klcUi^6+BMI|1%tm2;Uga_|g5qb~)|Kxm#uB{FRSL
zMgP1O)VyUMhqdwT2^{JRzo|7?|I$8RC-=v1+ee3g$FB+hwm!1$W?!|Z+j+zGpoZ^X
z;}+j3j%&qiQ-Ru5gmC5Z<GpUu_O08uZ8G?rzpPDS`cB0o?O&W@w|CgMFRh68njCcX
z-KAT1ty2D+tKYSFrQM-D^ABH>T7LAtWb}{ikz1?dc{11e&CXPhydl4**3CiX%L9#b
zoD2BFKCGRxE5F6WoAb(=Yu5yv9(+sfb^Pm^jpZauY<=CsbxPUO=866I&+u?<d9cvh
z7O7<IU5a`Bx5WEn#AAL-F?`q3?z?QK^+P_C`OQD&&At5}?o4L<&v5*}x>eV{UHheP
zcIlaP$A5<XJdMYnX)sm6hQVR9<|dDcZkIe>bu3n$@qqu>PTk~3{Y_hJ54}Gk*01!R
z;k0#{@wQrnna3Y@F}!}UUT*s1(tBIAqkkA5{55aY&*O8orn=fK(X041b;bF1JN=5|
zbJOFteRw+i>-J609PA&ktADIw_;NS@Xcc?gWBoN*-=@{1s7&I0{-XJreZzX|y+2;&
z))rlh<Nxq&>AJfmJjxo)_Zh^FU(t=f@Q?L}yU&D}EY0FIcPH;$vS-3u-Pc<S=l>AP
zzL>K{=gaNXZ5y{VU1R;d(NV%xzTr`WP5*D>$Lifb_gz`@$1vq@^0sF&)uO7K|1&U0
z?O#^I`r%IS-_@~uub7_R_VnJPq^{#HmX|*}wl}Pc;nXD?!-W|f;+~gu*8Dj3A~7`i
zYu@sYrmIOl4h8f0l;B9wU0tOjjPjHAE$dgT_4RaRaDSq|a`B1y;GjSU!Jpi(GCql4
zxiTeyrOxTsESuJ^rm7;0@)J-LuTwu5XE^_;o$AN5skTcB)H*A_W!#mRtZ`rK$@AsE
zHvYU`8fX8Z`Jb2kk@vs3`u|E7I>_xL)ee}0Is&R@RWR@{c12;1=!2?f&<gynkH-(|
ziXS@=b8^@5@E(5kfb9|m?!JeAH2gW9q}}B&TI2j9{dd1;=GsTRLU!UoTP-JwU+w?T
zAjbat*xpr5Mg|$olRun4GM}qrV_?bMFx8#;KTCJm#q9jTRrmc5yV9;x4{f|Zv>&{u
zmOd@kBr<lpea1#5weu@ua_xhRV>WycKeC_W^TT-YbxSkmmz@ukdHj2E*~6cr3;0tO
z?J52zw!i0(^{0yc2g1F&zrS0<Y1sdlv;O19=vn^9^_gBJy3FdiCM5r0cjNT`405rj
ze_dc-d@*uXe#aKy1m<P0zKPF#%X-N2PvcrEj13AXrQ8a=^|?7opYz@&Ztws7>4ZRC
z+D_Z>N5>n#^mP7l&Nl9Uu=UK7`92GjKbtV#ycp+be!QYoGV1=Jd7@Xpb{?0!xwUS;
zMsoiOQ8_!4itSA${~5S%ZePZ;wEp8W_J2PY@P}O8dRE(Ui+R+xg}JHxY4aR*ev^Dv
z+gjPS{NtkBq_<zzx8D*yx4HWATO+qHk^c<YGBVX01H(+#H%1xk691=uRsWA;O=a4S
ziBtI>PCEWEr#gJ;%Uh{!6VtZM%AF%N?e!%MWsdz57Vzs`v=h4jx@t+)_anJJueL9q
z@kVfep?Hl<g5bXs3{~&eNA;PssdTA?^fH8oN^r3|uqXQAxQ?ys(Y1S@gn7>h`U}3A
zv>>6LIq0H;Ox^Mi>ks_esP_19naA-TYa1K>);VcU-Su~VjX|n$=B&Q8%f0`oX&z?$
zxvoCy>naWA<Np~RhE_cG`Oh%H{>bi6i{BkDSr=0O_>H}n{iZsL%s=kyU+n7QYrN?A
zr@n0eInXxtkN+9op6?TzUw`PA?ybHxn>5t_Gbr;v1)V&5{^*SQ>ovBm{~2D-Y5!%o
z9<oVZjQ=Q4@;}j&=l_;u**`S@vD04cKLh{U3G?RV%zyM~Zyop|u($es;eU9=oi8`O
zIC^#Ce};+DKLh_WaI_zjjsE8pXTE7;&wmEqSv9BsGt?~l&tUb9ON04@@(=FZdWGpn
z_UT;PTFH3w^jnty3<tXBU)yz7{-Jx5iSOF{1<}3={~4xDublJz`tuX=4Wc(MI`DL9
zJm^g7GVL>!2ytv+->@7@bwK{PzPJxa2AyU}(?f+tyI1{iK0NE$<CQPU#QmyHs&McB
z_v@JZ^XQNO_L*7sj5WMfo=w+29qg~po>ch5?$w$946XWWGxvXD_|d*5QsMVT&z;jh
z-g;0s^^HgX`viI3nvBc#k53={mHNl|N1ifc|8vRz3>Q}XyYb_Gf_>$~yW#<kS0}l$
z{AYL)`Y#lG5(9t7e+Hpk`^TRjF8Q3i{^pbX7gEh17soVr-*21scISVF#`(_$plL$<
z?w0tEUmx<8$;aPq{`1lOkBNJ`JMY5RjqQO?jvxPL9|t~%;g3mvi#zYK?8e>wN_zI6
zbmc!>jFZd@cDpWFEw5%*|4Fs#uL$E4`41PYeD;5Q`*3Yp{_m9LpN5P7iMPJl6ZL~R
zcE-HsY@UA}+1Fnz;8*z1(7O8Z>h(W9|A<|wV9WDX;rw%{OZA7sKTiA4(5=ssdF@nv
z*R#NhKTqvH1x@E0Yl^Pyu$tGowD~&M8uxz|U*>;U{ilBAe})suk3ye#)qRRw-B7zD
zMSf{DuL$FlY4<n3Z##3ZZuP3HUsLWMNtvNxKjH4L>yb<U=zegUxaD?5(a9Sn*;9?@
z{QC3cdb}%xkwnt=&ZH)lF4KpBA{z1znqQ!U&4k)Z11V3FGlvJ#L$tCRJ)p8{Vm@B)
z_`{mHJpQHFteS7-d>78-Eh^Laaq7Y9^(<_ERv+AB{xLc3!?CUFUX_{6Dw+JcTs~t*
z<l+gdwmr{L;Ny^-@Kt>)Lv6cjz{S)n>KERmU7aNHl~cieN1v!x$CH#5ER&D>?OpX3
zWd?^9#n_-RGt|~U?DCgr^}Sc#Cv4v4x9QA7w_3|rwX4sS^50@UC>AuO^(*%SKJ(kr
z3G%<+tYxU03Mzm<T)fBspJ74X&i@Q`ivJlNZ>c}*USa>Qao6*|KiWZy`Jtx?^U6N>
z&+v=w_5AOT<o{W_|1l|V{?G72dR_hfj`|;OVP_N5*RbM`{D+f|*0EkSS?YV8SA1sv
z>u1Z)?4K#|OE})3M&-lFkNQVq?e0Z<kB;1a>m8#^u9E@()ouMpu5U*3<8}E5{~5aC
zSU=3~uyJ4IygXaw)pjS3skx8-Ua7bf{x@CT+TQPAJp=y|@NuGTeIM%oGMAqJ{gMAa
zgY4peQr4ioI=t8I-ygC6QwACr0-a8p@t*xZ!v*u0{|x&D{xf{MW&d#T9{K+a4tI<H
zfzEC^o&i0al-{O&V*R_gMlOPR`RgoOAHn*Mr(S%K?}_8Q{>($^YU|R&b9BO2B%S=v
zQ1NGfTL$x<MGZwpRzsi=CZ)_u{IUC}m>u7TyVo|?PV;b6?vOFrB)_Zq^)H@zS9ivl
zehjkz;aR6qVSMn@oSVC)Pj~$G*l8pGgD21afm?&tkMsjk^N;MF@hA52&u7_@b=9(3
zW*Xg+FZ?XI+?T62`Br_Kx!|Rgi8kNNr8jRb`Xb=8^2Z!L|GgPnt29CxG$a`K7?zSW
z5W<qm!;!#Y+gr3x^+*20c+qI9_?HIm*8J06-DN3S`SlLFz4OGkv9ThIkM@aO+uGlH
zPp|o4ocgZpy+R)zdtH%VRNS!Kmd%4D{i#Zs>D$of_a*<>^X>d_KXB@V+U0)n+Z@#_
zWA|6oY485>W$pg<Sy%Qc{-|H{s(54O({0?^@{dYXCH|>;F5sJmZ>VOd$q{$_2W(7&
z1lkh)#9HKQEA6~q*p!2oTpro~jceC`hKs&2{}~SI|5ULO|GTIDrOOn?7lWoE3B=uI
zC)SPFhL6w>tqWk^5FC5WMDLBLd(d%);MLtfw*O~PS${+xv}D|L0pI4g-|9bqXaD<0
z<jL-BQxCZp3U7QNdeHRq`@{LoaZ(@48*@t6x;+l^UU@6vHOq;WpH7@I;1a&T_x0la
zx^s0hW_=}R!$Y?3Wj6HG>3AW%%^{#(*+K5KYsG(t1LwuErRG%aZ#O-8@6mIw0_9n5
zcYYV0I^|y@`e1&1V%$#KBW4?Ht_!>Kco>~mW(}2la)D(^SZ&+h#jhr9iK<(hT&eUa
z=-yKm<qs^^S{X#bQlEExv9)^q=KHO4bF<C=e4h7GaKBP<_=6p7uS93v%XrqgGrYH1
z^OvB5P+)-R+f}i;t78sr=P}wS$xvk59(QbIOvQA^ExXLNTiu(@d%(irWJ%Z0F9$6b
z@Oo*APQ79@;gp*ryF!=bRFTaFq0B0i8(0~1MW^z2e&Oz3n9H_vnRkI7i)XpCU9ACM
zc31<K7RP(t)L9<MJ6oT2|4q_q*i`GDwUocpM7w+cCg-)8m+nm7e$snM`uSxSiXEh;
z1V(=U^{s!Yhw_!uY-#hv3ERavCGDF99i&7;#HMeX{(<w&yM;SjzD$y2Uirt>-+@(h
z+C9|^LGR!6u6?<>c<w&W-3~v$+@5m{vVtD%uyE9iSx1vUsNPq1hM(C8T0toOG5?=u
ze24w(JM}NJW0w}zi~VPKGWAFEzrDx*wQXU1a-VOX$qPIF53bWXYnQ(Yj&ZmoRv~26
zUc8wn_27$I?c$KXOG*}|J@&Y$FZZU$E$^nF$gjCeCfV-z)%Dtr?LWgw)l2pF=l^GL
zw=J9XtUd9OuF`X#E4hznaXwD@c|~YR{~lY8SG)|B?+@B@_%3?!eb?W=clTcXw&{`6
zwNGn0R&iR~*&`I$S9qLhXN5w2Z@hqM;L7;Z?5$feZFb}?nw&r9rh@t7Et9Gm9At~b
z_orL0Zn<pZy7<hKwNq~y#QeQ<aLVI<f3J&vc>i!+>!Wy~AN|fBS<k+a>YHn(+{YPe
zz9KEmX5~4)`@dfVu;)M4Z?0mSyYTGlA8wUf?ne7N7mA%;mZ)}dl98w41`CC+tBq<L
zuWp@x^V9buVtFUmI4rvMD58yP+t;wdzLv#oj~CiqIqUvFz0qW?+4F!sS4=zBu3eOw
zT3jg4wOIXM<hB0h_j}kMyl<Nx)jRd<pL?>qcBf8!pJ*gj#>jC?B-QUu$=3yZ(GTYz
zzTZAi>%-o?XTKi2QaZEQ^h$2UNfrOe$BrsIzVqBRd)bHZ!*|OM)C=Ub%`8g$F!f4$
zNwE+2;<qQPt0%Y_zo>lav-5}M1Amc7$JH<1mM=eEE_I&!miH%t!wPQ;3a>9XX1jX!
zrFVP$KkR=rZQ{e|xka(b9#>9jsT`Tq^(H`oQ<Xz`<3afatomKwcFq0Ia6DG+wEK?i
z?BBC39`vPJ7`7W}n?G3EFY_zBl>dM}%eoo5%i2C=Puh`vxai>Ns>bj2&OfdjKF$rh
z?fQ4Ajdg9st7)&ZW%Aw>h}X|*nD4;N9QV;dPW;28-yt95kKA709@TsMZsw=eo(b>&
zsONofU!cXIyzJren$5?>tCk;G-_?A;%zRT+#y9VMUl-fgG;j;QZVGj<NuQ;k8ZS_H
zWt!5FPGO(++#{VQf+lXX|M9u?*QJW2k`K?{>aM@_ip8ze;z@LQ9<#WnQbX;biC-la
z@J;*ufW7JYrCGNwS?`YCyTg0G?cD8UI?JCbUtzH4TKFOT$gVwB+txqutxKEt*;6NB
z>Wqyy)`(75Um+Q^@Aw6-Z$b_%lN+oi#|eLUmikfr_<NDp+tlmki}*E#{$As)wEmgk
zj9Lw5w&M=_KE@wAFZLqfzHj>S7=M*n-G>U^bM5%a_&rVjZ(aMkkJ}IGbKb1b588K0
zVcw-*$8Nq03%0hHoHR|bS;&&%%W?aut{<NN^M2k}H>v)`)%AWK_y6gzQ~uAeA@=;g
zt}KQ}d&D2jjr;yU=hm*>=U3f~yrblC-`LnqI$1u5&2Go{b;1wBTWW+K$C_2&7mBz2
zx8LOLvxBye-5!)BF!J2|dh*M<;}4##O<!0eX0>d=n`zT7pZT<DVG`Q|D?b0IiTgzV
zSpR5Vc6@!N;Q36^n`x0bvrn_MDLgY{(O3Auuz;gvee197O?4`fui}K~?tdshhjrh!
zd36q!ER2659=6v!N$c;H`nP;l#eDZ4)r;;)Tbe4U?A;mMv{9%o=2v25$NK{>e2Zr7
zu~Yq_`6yQ+>PK5`u<QOQ%WtZyFPh4;+fzZt;IN+ME8F;w!H2Jj-<R1EvGrzJv*%R>
zk24oH1g$aGV3^?Wb6a{}uY+vo55WieZ`N<>-_kvI$M5u8eSdzv{XM@;YyKhK?4V0F
z-fRBYUVV4=cIJYUsg{;!68fjVYzt%FuC(kwLtE*G{6BwR+y4-h|IZ-3tj7Ms`i3v|
z9n-h%liPna&$QM`-Me+hYNrJgmgI356*(NMmiJ=#3cC9cZ?kIjHt>I_OVw8}-_Yhi
zCdFK__)t_|$8@H<>mGBcrrr^M#=gw0D(tBU<JlkFht5AZ&wleoQ0&^v(OX(ht=wSt
zT=%m2wOw8s3;1n6=pXK0&swoO`pV?czDK8n)ABd}RE^~Oboz73bH*7Z?*o6_e&}PT
zd~NYZzV21Ni?UC!<t4HUB_yP?2oxT3e0fFcOO^)nv1dR0ANvcmZ2lPE_jJdl!%QEd
z7oM5%rnvFPg!^7!D)-1Q@_q35VQ`wb=O-5>Rht#;ijDui)#d#99K9ugy(j9A`p4t#
zqHApoA0Kb`JE_TgB>l<x1iQo^J?WRW*C>BjxMjUyzi`Fm==m|%rX;CP5SUol(=*AF
zVNMCNt@ZV#fev!aAK9HZeN=zTz4*nirp-}zGk?n`C$^t9WRVYWuy4)Xewe>QETBH^
zWxb%9RDyJX*BTYqFDxk^Y&rgDufDQR-frQ?VBe1mu1pm>#jpKlo}c%J$JXBqzyGxk
zFLaOzKm5M!R(?Zndd9NqyI#-#+3Bmsh{=|%c%E_I)>D1u%eFrbKhl@}_@t3GDO0D^
zJF!DQDQ1H4+%*<=j88lfsFK_H_Qe8ziP;bHkJ!3h|E(YRqvicFgBuS+Pw)CA*YcM;
ze9;f{gR{aLzc?;?nQ^@EzR9Pe&-N?-HE(|&D=NZxxFY-EpIz)nbgPfNU9sfkDz@8s
zN?OUM+n#Pd@9*`h^sdQkIk_uewoY-mK5f$NPL314FTyR}_grfgVLbjr`@qH@$p_9q
z?Ed-qO2U*=s&<jCC3UNN=H2^x)cr&Kk$=)VXH@7P+Z=J@)bD?13zoFnw>jEc-r}v+
z6=8fDrG7~IWQ}X?`X85%6lYH_TVf)RtE7@pWHryN@&H5nx~lN_$MwuF<E$^Xwk?rV
z>FvIK$G!K%_U$WdkJqnYDY+8B-h2GR`A4o><ZoWPFE}UjK~7j|y@CGrAbYlT_Yc%D
zT&`hXcA9m`q}aHa4cV(tx=96D@su&E_f^gFErncAOa5iFQ#>8y8bOOsh6_eNl6!B{
zp7LY7-}rBtADsT_VdAW_Yp3=*9X}USSET>kLGCCS-o_u;Yx_IJ3vbq#ew5i{5SnY3
z(P+1BDf_EKo9p<ZZPy>lWwT`DNuK05-+k(hx|RU;QZnpk9ISfcia&NADxatD;p)ES
zOWOY3xVZFp=(dBO4J35L?>`Nbd)@c-#gzbd)epaqo7q`?coRNhm+BWUp-;E%P4e$N
z{=IyKeS1Lm+Vwgg;}2Cguif6chks>#((9CIlMIg-Pd<5$@u2NN7J0{On=c>#&mdBw
z_#``?|9(*E<8zl!CmvsaLZ+_yrGwmYP#5-(?tcb}`ipAU?Jf8JI4b{P`=7-B3@)={
z{yh87@ECLtXL$dAhC}y1w*M8%E&e?FKSNL8e+KUQWAa}-x1K+j|Ig4t?%=37R0*H?
z-S><VTYmVjZL{T;Ed0-~h&62PKfND6r2RKJKJ?Y|Q<<_+SoEFMyYm<HuQf=2G>)Er
z{GRgWAH@%E)?78_EcKr9R4LTHWd66R`QNtn?G$}eCKiynt8{Dcs+{fHIuko<MI9LG
voOZrv{=x13L2X*@mM2Crr<)7Q75+Y1{)_Pj|CP_Nz9VOjgNUK2`u{fp6I7@6

literal 0
HcmV?d00001

diff --git a/docs/_static/storage/storage.png b/docs/_static/storage/storage.png
index 95b8e3724b7bda6647ca4d6e4f8d8dcf1bd51f87..319ba1772305de3f6f52bdd6e554ca40c9afe482 100644
GIT binary patch
literal 41066
zcmeAS@N?(olHy`uVBq!ia0y~yU<zhnVBE{W#K6F?uQlZY0|NtNage(c!@6@aFBupZ
z0#YM9(|mmyv=|r|I2f21g&3GYN*EX!7}_%#Sio!s1~mo{U|zt8kX^t8lVu8CzzkzE
z?gc5#%Wj&>z`z)p84^(v;p=0SoS&<gn3A8As#lR)08+qUQ(;w+TacStlBiITo0C^;
zRbi_HR$-M_Yy}e5S5g2gDap1~itr6kaLzAERWQ>t&@)i7<5EyiuqjGOvkG!?gK95I
zNwZbTC@Cqh($_C9FV`zK*2^zS*Eh7ZwA42+(l;{FElNq#Ew0QfNvzP#D^`XW0yD=Y
zwK%ybv!En1KTiQ<Zemh?X^E|p638M27=XC5Br^?RQ(iIDTgmyk`XCv7Lp=k1xY;1%
zL84avMVYC2C5a&O?F?<eDpACs_SooyEJtz-#HV1<AQv|~E*pJtn1I5^j!QD`Av*(u
z1B0iFV@L(#o4J(>t_FYq-`a9&ikjvWO-1J*4nfIDJe;1-<eVpp%+v~b7G(5ALe5xp
zk>fEV)j&a^sa%pqr)RwUUbW_ZzWwdzHRqnstA4i=MDIB-|NPzN@3$^juit$=E_?mn
z@~Qj({mK@tZD|k?U~+Qcn8L%fdYX^_)j&lB7O>cVSBFXdHBipd)&`aM%--!)+7QV}
zHIEn}3N>_DRDOD8@3|Tc;ZJxX%+CRqbI4RwI2j)Gc<ntdFuy@%euV=ESk8;P@oMtz
zyZv^*GS1DhJUv}Mep^mtj&x)R@9OC7d1q&tMsLa3cr`3qH1_f7oDb^rYmBn5tvPT1
zKgK3lE$pe1hr#WR^ixMUX9^#mo4(_)YS{es`+j9zSm1bemg#Akq`<Vs18&l}S5}9w
z=ePf35Vm-JY;xV-uh#{Y-L~Z1G~$+-vNVQ6r7ib}@l&bqT>{4^r>`=&9<n6d^33ns
z`TJuFkBS~{<Bi^!#Cmky_Ip*;@AsC!y|q=^BxAziD6c&lB`@^%{Yd)t<>fNbwHuE~
zrO&VZHbK!jEZ%nCt@<w$d}o=gOg}%bTU@_wja#o&?(J=B!`IKNP?Os7X_;CN$ja0P
zofnVK8Q)yHsd!%Z%+qUcT9>`C$i21Ye*J&j=UZ1ev>%wAzwhSW>hG&US0B2(#z3H5
zZ)FElGdq7=>DB-5Le2B<)%^MKasQuB-h!t4BxXw`%+}xkXVZ%+#d#+`PdGX`J?x}0
z*TSp1QCm2AXZ&1U`1qJ{+8K_efhnm92F1_LwDC$e*_`f^um7{rXw`~6MuDN3KeJY^
zEjw#^{m;+m^QVSS^x{)^aHGvMxNvR2QRBviXKtV0@zLj@s{B6=4x42g%HH0p{eHK+
zQkpX~g`?)%&2-;cCXE06=30pgstK$6x!iVsv+$K@;AY{=SN!e&Ub*J;_t#hLX?~sQ
zT-HGwAI&`Ey>DIYZlkx`vaY^rm#@=!b6`r&+xPqb*R74(D$#m7fB#+$hXb~Wo6njp
zKA2JMdhvma*{7@F@kgW1g%t+=@a#-~bfD$q!<kIp`<~CM?)$mA>gy}-X*!*U4@}9C
zKGq|7*x^BF3ztN_fLou8XZFq8lP7N8(0nw^khyVkNaE~?o^Kz`^a(h{?CVpp?11`B
z_g<-^^?v{V{WZUtBFywlgsbt~T<gaXU*2xNf3N2A+14!QXJ7Bte!nZgFJrO6YpT}F
z=R&FH1YcZU&Y#*RYi;&i_4yu$k3Q!V&#pT#<AaZ)zv+h6?aWrzPh_IhC&>1lnySrW
z@oK~Gce@{FZ7F$q$wIy;VS9r!w@ktT`K?h~vv$7Pcj_cxW3TzW8Nx=NpP%ple3#?V
z%r|V?ClwuSOy^qpdGEJd**g}#-*!7ssCw#=#HU)*8Z&Nf(F~8;^YNJU(^E6jA6<{H
zm!0i1+bmb*+oztIjMBBy+tuQ1gt_{ZoCQ0W&x-_oTKKB*v{-bGU{{~g=l%nI*6%!)
za*OGlsJ}Y%`iWn%kKB5tQs=o&ncD8~_t#g=InA|G!(EShi=7XVIV>|xfPJ3kk|)l4
z8GNlzPIrz74Y|f~<D<}Se%04bTGq;LeydglFZW9gU-HS=G2!pm>+Nr5=kGh&vLnFc
z(M%^r`KWxEie(cfz3;dG$1vR@U$d1jx=H!1mv~v+<2&xj(te+g?wq*vvY9l;oSesZ
zQa}2vaZ~5B3HCTCzWQK5GPnS8THqou;feOd#xV0%a3S^Jzrchi!rI^xqy=y3!sxm0
zl2rcJFi2^m^79C@6S(9Fh~n`0H1%4=>g^D|!bx>GaOua<c*)6O()umOqW6k|N<$7t
zPrHv`OC30-h%rs7S!#Ok)mlhN>G7$P9qPa>AP27f7?uxlpu?p1|0^D>-F|P^>vg-Q
z=|)eJ^{@Hyus!9u_3k&PLVGy={CM2Y^Q&8b-;C17@ArPcmw9<v>fBh#Zox+Nc@<3C
zPVRbodAYxO^_lstr*@TBp4K?|y*|KymdVQ&VZRyH<#}Zn&d;-*{bqC8*;9JEi$s-~
zY+TMQ=$Es#dVOqu*{#gQZoQ|hb{mN)g9;0~mrEvZ%KIr>euwe;(LaJ`*Hlm6-ZQDD
zvN=kW6I>cQ=m<=B(tXrmg&-s-CYZCR{5-Vq-NnVn7dSS@RX&~i_xt_&&1cPSr_IjQ
z+xz9xRPAu9tTLAK7LR*$x8K?1Ia%%boZ@p`qS`T&Sk?UJ?YUp~dt&+Lv*z|+uLR!}
zd$9Su-R*6;w;7q)EPh>&ub(@kk(qs$1Z$UIqhazfgT5IKi9Bp`7rwu=^mG#|cTD`h
zACLPdD!aei^ZA^LY{3D>{5>Dryf+<~r0Sh8Rd?GBrlzvDw|r+B9eq^)<6(QBthHRp
z1;_jK|Mwm}&?Rzjp>zBDeZTbrI_ks(nd0mJn%aE666~L7d_QyfT#4^@%Wm_!ecJuu
zlDEEW(FsKr)BJmX?*IR{eZI?&?f2_;f4h~f`~KVQeEX6Y7vk&xUd=FA9kk=Il2ZPj
zk8VlxYCfHGP*nX9JW0Ri!p)7<9}cpAzgvD^$}}q>G(LOnR=xdyJ{6z0y}n}R-Kgz(
zppsF(;z6VO{F+b3=WUuleCC$2tNC$|UH**C)z#tallv^&e2+}ijjsD}kp04&&FAex
zSBEV%Db7s@dVOo_>)ZMJ&rW$heL=xR#Sb5k%NsBA(5VsVvv|~DmV2v5+Pv@Kzn9DB
zuhD<=>L9;#?iRt<t4gm$roZ3&J?&P}-J0)r%cHks7}ovyac-{lVLuhcS2@ChYBp~+
z9RASr;`a9S7vn1)ww~#~vgzHf*Smy7G^|)A1by(?U-$Q%{r^8<YolaKuLO3B=_dI;
zJ|gNQ7|EJ%aDKz2woez`<xB6T=pKF5zDMWm?=LT%IdxWcD$30g_O}r{ZmT91Ew}gA
ztJP-`ve#^6TU{CM^pfqL)*bexTz7uz#R&fkH_ld-YF?BUsB`9L<^9_4H~pvGcv1Fx
z?e=+pTmE~rYQ9y!v^sKg+7X*iCzOwDD01Isn7ffXmqnxI_4@sGn=aHX(!aJUw414r
zFJ=91uc=zXYCap59I88><#NeUaK4+-1)g7p$7R!f_rBZp`bXu9h3&fv_djUj?l4?7
z%Q&4cF)VOhyoY{LpUo$a?wi@`_X;HI^uJM`9KPId?zNcWv*+!8pGo4g|5Fh0bm#MV
z%^$;JORpyP+g=k5J>XE&rp(k5VOsS4Zh88THCyM`eB!h@aO99kqGA5NpK6AED+MyY
zzP_HmGIwRgCB<C|4k<##uh(uDk~2(dS^DYk0cL)I;?HNzqj|FR>y-W&co{ytvO4_y
z#>uBA{dbUQbGDN25&V}Z<rKTwL`5>JBY*$jYdrJ%ZNEt@>QkMuG;Dp`+^u`GS@V()
zo^#6CdNpkGi8fy8O*ZMW>285xf4AQ$;_kK%ORvb9Tl?)MOH3JSmsDf0pXJeB>q9;J
zbmI5T$**X8e{XN~?t2RlSf28?|NG_V^ZE9_UMx1ul4P8$?DO@s{{9VGn<T#2t2usg
z4fF1mHg9W<3FHznWt!19Lun0ngifTY^!Ws>`R4ieJ|34p?{qV${7&KVZ*On!K9@Az
zDJv<q+;D?{&!aa>eXMo1<?ViJ_GW3fxc)Jp(CvA#?lGqtuX^wNe6cpY_K48!iwPf8
z&Unq4Y{H>??Xg}b!=G1m`@Y|+ZrPZ)Zr`u}4_vb*O&7I2RuvTavi$wMxy6<#dtR+t
z9TUgeCCfO?bRBm_PRrRZTM8aJ9rB%RR=V;`WUrLzrum<rpI85I!I}SOwLuzpsdcMQ
z{hG-)6kIftcCL!tyiD7xagp1B=aN>3LMHd6d!!yK=vKVJwp(!ask-kstxIg2ZKM>t
zwQ^R?aK4*#ce(!A5C4}vyY!~sW&PbM8;9m6j~{!Ra;yqn{p^H~{+nxSXUk`<l+M}E
z$hyH`#zJOo_HXRw%)6XKG=f+Jycg;oXF27hH6d$}VAbQ9w%=|vuifWRR@M>Qb9W2F
z?}wsOy<VOW(a>YL<ZHU^3+Iwwo4InDuehw{{&IcVOR>(mC%^st5Zh49BqMPm!pdIt
z=PBE3Cv0|{y%QRfwnsYVMp!{k@a(GekN0an^Io3w*h4JI-~O-3>J78=c5y!CZ}%@;
z!fY^+QOsPY{DxG@?xR*GOpd%dUe+OPlHs5-+iKrs{m)XxVQ+73?UkE$_%rKE4)%HF
z_bPv!dk{6pW6c#!=Y+|=W{M3{FR>rlpU$<-@$lzLHZP8zxg3X!PPo>rNsx$fxY<?a
z7gRS>Re~>m{xk)Fr>~6kp6TrRIJ;=)ht|TxH-RmGcUwlh-~4p*!#3$z;lHfk?`eL|
zysq|oO!3*&>9K8!kAA&gUw%{6Lxm}4%LV;2Q`eOXtNWFt3r(o(aMLp`Hk^>iwmx|I
zIot1doFzVshq3I_WV~LMW>R-3e4PpFq;`fj-xm4eZtE2d#&dHeqSJ4FPConh$K(Fp
z`!-+Q^m5tkO@$eT+YY5o+WG6%>W_kZpG@*T(|qjW^c;st`m-1A|M95%X6v!)$+BrN
zH_qF9_L(pH{Z6sJ%%2(a3fvw9xtCbiDXD#YdU*QWR-3-)2!p)+f6LCq|B!sY>-9Pj
zjlMH!DqZUQyQJQ+g~wH@YL`^BT|EBsPUN}^KW5JUP@GtGC|8z=FWE3L(d<BaW89-^
zpC_W>F$Xj4zm;;FnPWM5v*A(ZhN!4KmKpLFF5RpDUu*NgfO{i*&x*sz2Si)c+FLt+
zK53KAYp^ZINepV5{y-+<LtkR+*+6zR$*x1^S+g0B?C{+w<YlICa{Ac|k8Qsm;@02b
zar~#SeCmu#*FR_ohO_F-tdLM<wX{8~a`e~VMcsNzovGsPGiEXto`0;KQ}ujqxln`h
z-BW$@f2r^@ioIVxzs_n7!}$jnPJemw{(X|1ymgrlANQQNhKAX^7Ke{Bvi<3buYPOV
zF>&q_qX>=%IW{RL1oWak<N{tzbZ)pRbeUtG&K%*RGg{KsUgu2j@@y!6=VR32P{Aga
zWZzmmfA^bB+}b&F-YiI|+BnxqO-7<^@!`L}zn7~{$z#~;xzMiA;%eZ*eZ}Y6=FTo@
z-Qd{UYuBU@d(7#m_|Xm#S7l|l56|b<w`or(c$snIPcy^rFsr7BqvA&kL|l~@w~K~b
z^jR%ge9(jMe9ga~&tGyRiORP>oY3ywDrRdj>$7J#%Qn`WcQ1oP<7aU^cp-9DG(yBn
zSn{hC6Jz-U{V8p^B6bTsyLen3uAK{r5YdEmUOQbKCh2c<Ufl<63287*s#&-}BM8!3
z6zXnJiBD<|lNAOx1RDcbRDL=o1}=d#{#3*mJ@*~qTWh5TF>4`*$EVgqi#!y;9c2$4
zrb#uOvQecz5VM>FCp_V9b)5)pVnwj1{1md@@@fXetQIAOlj2<6N|45*!bT2{PpV}Z
zR}CR%ae!PYBIXL|JUb)_YAkuOIehV#btN@3P9HmTOx12J*Onu*j=z4=Sn*efgUL+b
z(affX_$5=1HKucE{;cqwG4ZkKCMDVZPfTxvlyBR+I(D~N?&GzaZfx7IL@JFrv03PL
zUw3+vsY&63nND7I)rDCHn^-qWO%2NH>2>|$oxpnh_`LOx7oKa{?!wE!DBUq*zoy2#
zPSN$p8q-&O2+LpS@KE+))z?>viN9Yg?&lC$CAsRlz>hbZ&rhh_v&=i;Zt3-2U7h)L
zzcQQIc(bmo_;^%2K4f*6?zXo&t!;mQy<Wc}*hYST&dp60-1Z-RYIYc^*#vvc?SJ|8
zK{LPGr@M8(-+q*wF)5dG@qq{fr9!dl9zNroZZX}kH4z_=2>VZYlW=s&V;)B4g9S$-
z-*l!Y9n!tl1L}pT$~(-dd^WR&U)HMR<)x(?rRD`?^ZYR8%5zD&_xEwX{X8)~8H)`K
zV%;;Gr?fv~v~zZFHIu1au~8|sBVDU0Zr!J29qAcfiT1I(%UG|4t&K`F*39wt+EzA!
zz22>HqXw6N!Hk@3H`7dh%*tM;*~O;5X=1~90RsUZ_dnJe^EwYp9Boez)3lJ8U;nT2
z@v+{Y;#t?%$y(R5=KHTSWjNy~_eYZT*rJGC_Jxm*goek8rvLi*__(<D?8#UB64-0n
z8X7gG)gk)Ic5KNP7r8$3JpaTx`B={(K7Y@ve7Cl}VW@XoxZ{^y$_W7$v(Nh6C++|L
z*}Ow4e$QIgEQamOH9U@mr7kwX6Fzn+o<HLB$xxzOTtDyTCRJg{`F*n1ezVPZ^ShgP
zf;2<8wkhzOW3xEXTyI!b^Wnk46|!FzRepZ9Mx*}c)9FcXTsLl!tYDN}!6<WsaeqYA
zyY0a?j~55te0bv%sIPlt%j<phzu#`xUbiFZ#)ia-Zo!@Rs$Rd{`~BVzpT~#ZWr(a$
zV5*p3EWxnx``hjJgF?>EGOhmpuJ+?m@$j`#TboqHwSF)0oLu$oP3Mzkj54Jex3}r4
zryMcfDa&`GZL=PzXX!m-(#_k_mPIOg2X?Mi&0pG?{)wUVPUW&qsdbg7|G!$jo{d-P
z%9_Z{Ha)(Y;f#kK@Z5STyFlX7<2?pRM>x`rHJ&D}eY20*C_uc`+uLMuSGv~h(3qbe
zpUjkaS;MEvU9hM2+#Jh0#fNVxPFONOQn|q>WVzp5+44Jv&&0Q;TzAZipKY2gR%i8S
z<{Ji|>D=2^MsI(2&icK~=h<&BZM9d5vk_+WbDflQ&iLch8d1jBN=PPl;8^0k;DGwg
z%ckBRA(>u*MT?WsbDtMe*imRs_7R-$r2FN86_F4H0iGNlpF)K;XhD0cNnH&p@jmQs
zXN4f;j~A$8I;kOaBSg7CFpJ91B4Yz?alJFXj#^7o1R{18rJno}DtlIvDQWf`2hEey
znI(4WK3X!_?~yfo>dKBw4h(sg-|rNAFJAvDRZp-X*W_x5On~RUuH+r3&dfB9uleY@
zuQ*^@mj*A(gK4w#_x;@a{hoI3yH(S7Ix@61f>rxMRNvT`ta)@_alo7|4Mvs+V)OEL
z{`>XXd%^lwN#_J4>}o7>7O!0PsNvPOO<hSHPa9Xio~{>rYenE<RqtsbD}$CUQq`)z
zBv@b*yf(^ooy6ME)nS*a*Zh0R!1!+Z^IdmA8D_Gt*~3C1UsKV8Ay)CO7k=l*>F{^h
zGRfEdShz+&)OEATC+m3+k2XZ-@7-!*wKgtHgfW73qr0Y$#jGr?j%6zGtc-u!CkBUg
zbS;XM`dMqY6Ese;HeBj(+`_m+DvRBEi(G%`JW&!oA;ajo@61N7g-3Es^X^zoH%L1p
zu`|}vM1;dt;ehe{x?eAw`RydUR(dEt5NPnM2srsS<<`ZJ#}`g0DV&_nn(_biqcdS*
zM`t%}a{9wB+j>aAU|wYI)~hj(HY6U7iC|O|=yGWAO!>g$s`=RiVvo;O%O%HJQi4M`
zl@nw>9ha}S;JH`vxcB+I>T`cX95`468f+6Lg@*q<Ub1$jT0_vzx>MUX#}xc+mdIxI
z3wu#nr=u?@th`-YUbt13=|fK?({2gwk{1^e54CVMdi)h(YCE9VE@M>q=m>+<rw=>~
zLF?xKk8qiyy}ZLPn)Qg>j0a2fnwM87%QB_C^6{N(Q@P1+>+(a3Bm^1^6Z%6mQa<<`
zRrxFdc4*MD&1Orw`JY{Xf5}-vHow4Trt_(#?d?@DW=v&Owo~+Ety_h(FS#d_{%=3@
z)G6V#?sgfA0)@zV_5XgZ3|_u1_jcL(z26?C|NZs#bZB3PH+y4=4Kq{r`n_c@A1s)c
z*5~l}Sa0q;#qMRZ&J=1lPwHi_)#^B~z_Hn3=6kMbi{C75Zm3vRa?GmTR`m80E}uy2
z(-D#yk&ud1sI5UI{t#QpT|r2uFaiy{Fe|8XL&h==2~2q6eWPKOH6)J&urW=lNo81)
z0;x$oB$y`Ea7qW1x<d>*<=`-BJx{|F9Y__<>Fh8`KaqLWJLtfYBxsC?al#}>j#fwn
zRm(XJS7jlFX)rT-?lWQt+61Y?Rd^UZ_qFh@*rg0HELB0_WH_6Hml(vIEs6>!r*B{k
zEr$*m@vx};JjLK~65`GY2A~Fk*@9Pn5W}W4HmJl)GH8x^nrbnfu(gqyeVXH<Cp-c-
zN-q0;zgL~V&v=1or@#+=rbm(=9mP00ayK#PntD&&|L>Q!X_z4-W)xT^bv3M+>nR)g
z)f<v%4uP6-$6cYN@PrT+m7kNWHock-iMbYKg_Gia-AT|ivl5hMN>i@tL(&X~yTc@X
z8F5$z5F|L^iTA4$tKuPc3G_9n#9QcfK?WWhmw-x{D-%QaLz0XFsIV%C5rIy;c!FAm
zE0tH*aY9nQ1=FOOhdVT&b;qQx1FuBi-^kzp_uKRN^<k^S&gy6V`SCG--_K?8$q$wL
zERt^LZvXoDc%Q6w{yiJ}V~5=3Yrlj==RQ4tzyAMUZZVw?$vSt|MsGjXEB*Up`Q6g%
zlT^L`NN~?Dx#Y<!ZT97H{_eNiQcq9&X(;yY;$ru`RbT)7n4Q0`^5>_gbv>=;tzNH*
z+M4zAqq1Aik7u*<|449ue|NWfZt1m;kJs;hw@be6$HD%M1^es%UW>}swf}cYd;OoU
z*W>?6aL1QibX~pvU%%s?Kc7xZ_Jy2cWM=y@@q)w4;}JWuuC5Y~t5_)SzxT?!;}I2a
zw_bmDd;5BM|M(XZ<&z(lUXT4gr}*5*#|PP`{E^_!-|?_5eO~1<dH!b#?~h09sQdeC
z`P{Nu^5-p#X4UnyN}FbtJnB^c_3;Cn%%kHGJ3J?=9hWViBhSD8U};@X>-Kw9uk~Vg
z{rLF7V9uN45f!`NZnOUL;qc#zfBHY(e@NEZbJ@=tG=f<72Q)mDc}d0o*rDC;cD-g~
zmw9k}|EE*h{<dGI$R|Iv{dObye(m?Kk3mCjD}$H+G8D_Zz3uJ1>US@X+y8#ES%2@B
zN%F}L|9-!}zx4IBx<6AigX3zyhT0!H#4cY`&@HC>=Le^-+L?K_xBE91Eccr`%RK+x
zar;jvl=Js~ohG0B@Z4PM{LN?0>i#(M+nUt>tFb?J=(t?<8?oq|hsW<%zu(&}ru!*b
z=g!{h?+3KC%kNb_Z<nu|;Z=D`bGb>zg#-QawpCx2&#!xRJmSZf%l_+wm-orrXI)vL
zzyHsr{*49e<Mv*QudlVQ`|;u7{o3zy<&z)U)&9D&BJgnk{%5nY>wdjdw?B4BJf^@g
z!AZ_2g+sVh>#6GWm`$%%{l8Gp#w#_&i;Z_m-JkdS|Jzl3NU%S4=!*B-x<BXb|JQuI
z8vff*Y?=O-6aD}H{eIuhEB)+vghY*lrs+N>!#!6g+)aOaYHISao`>~Q`D{KMU~Y6u
zSau<8Y0#ll3#atkev4S(^KYV)Ca8L5Q2l>w;gmn>>>4v&oHT#_=Ls-sZ4LSqFTNm!
ziFfIfdQFFuUu3;b)_XJvJrwlfyw9#77`t*x=&C>81Nr_R{;+hCVDR3m()m3fw%@Ol
z_KMtH_LkwP`urLpubn5=X0vFB>%~ZT-Q1FSnc?Z3oyE?Ypde&;+9PS~q<MQwrtkuu
zxxY@{-dFpZA$7%r#!nj#^Kl2Xb=hj{E_*ArfJby!z`UwgD;ZA(OyqxhGkw170-o3c
zhnxHB|1+e%C}Q_|#60DRs@+%TSVpf~lfB${-tq@rIv(I<z!}gKt)Xhct|8eS;B`#T
z;pC6DphNtOUKMIngygsb4z(|w@*%fD=pNtFC-sUBC;xkEsQ!;<^!mr{tod_3Z@{N|
zS+A2e0t-^sc`Qu{<DAgy6=P%+%&Ou1)OO05I>u8MuCh%zvx3oUiN%})i);ku|2$v6
zf_tSbU+>Sy{r2y6Jm!=8R&0Ft(o%2cDLRkLZs%-vP_bv`k~j3&mv(m6(jYbwJC2AW
zZ#SLRTRDZXm7nR!ySVDNQ@u`pvKKh1^zHyNKaa~N<50DY9o$!c1_U$RjGWi<s%GV-
zZdLyk=i04AH72KqtT;Da>v_bi)>k!+OySYGe=?6<VFC>mU2vBPmX6$<b~fPDmA?E9
z2iX=*S($KcZM69mqtXVmdlioZPF-<KZ<P0%xhusnlVhpNbM6P7N<v;UcO^LO<OFf2
zbRH7%nz<{+nUfpBO%nH#4ES-Pb(6#e=`|}F4!NsvWjJITo&vGBpEjIJnZnrG?;-NE
z;aD1&HRb)4H%FGIxi9q3wE+o8^sq3-&h)VRFoX4ud0TtHT>gEfCSnI>2x>9^i024M
zQsBJ$^T&U#fTR-6sSbb6vuPMUX?az1hMDnHL*a=a<|@UBJ|YWyrkpbN5m`8e@$Xcl
znHo--!8>2iOrPgiupo5vt4EXl*0Jw;_Fw1w@0t}B9t$jsp9P%yc02$7`ue}!rr`$+
zUW6~$0I69x8fBCkH2>}Se6IR&ueru#SGH(%ciGaAou~BYKS|bRom+dfOLRr#jJ%s$
zw8K-CAnguD#R<YJ%Y0|IEk69^<z>ycGm`rfXWL&Dn7VG!Cr>-C#Q(zVjnesh47pj2
zW0ghpAx#%21#T9BiX7>OvWN8;?BrLPJ-lFaTKA`uH*D;Kqp?Zh!@u9}<x4I&-cVS-
z>(weAX@!&16$6x@LnSkm4#>r{-I`bGFiC$I^U5@69i^)Hq2IV?VX4nNo1b^f@ALWw
zK?hSf8W%Y);F;U6tS~!&pXKv!JN%TsT=2PA8o>+kMx&7X0-m|=AGXUsi?LnqZn-P`
zk;ls4&|0&YWry?=`~N@BpO~n;_;zI5lgEBGpNeB_zj)1lv1qCm%un+<3QV=bKNP&v
z+jU7JTfKZz(P5+MX^C%r_V%dnidhQjlQjsOFlM@Q_e5)f%=^b-J2^IrF8#ds@3-4A
zoA*AQ7VY-reDV5_xv=;s;V3woaeA8W&Rw_G-SF|Arq}qR{9a}Idr;r8$;}huJO!3V
z%?+{X@>TXbvQws<`+hY%-ch~veB)B+q~Z}7#_HROUk=J#|D<v9xUsYPoC2p;pH=lX
zgWHXux;?42!M0oM*QLqX=~JE@_Uo$7cyaoK-hR0Jqvi%%D~Z28nNyy0`}<!Jf;2S*
zm}V***!*Nqf%Gqnou}uWo&5&INCqSQs5cj*Ue;`sGBS08v~09w8DG{+<=d!rLXqi?
zyftK{#U9VeYHOpn=iS*+$j7Z85}5zzZ{)5L&AE>>gw!**uL@n&X7OTS^xP*T`C`%g
zXYBGd7q(_!mo!fMVQcyC$K(9HU$2>7kGZV0+-K&dqNk_&WUYUh6!Pu=`E2&`c~!5n
z*YAC1{%L|UpQLS738)ouV$ET0F`XH9wN|M`8(pupM!2opmT<H1$wc?C^>KTfTs3zK
zXy`C)Diaf{zdBEl>5l4_`*pt!KPw4YEa<u})_2_JVEO&p?~&<qpLXr|bV^%)=M$mt
z#?KmWZcZ;w;W%0H_v`h~D@?ZgotbUEzWn{Y$!fky)0?K76<_)%9$zzYv*#V__>Y(7
zRXpPC`t|GedhNI=UII+5t_ohC)^5L7H9K#Y=Ka>(MOorZXLi1*@h~-6616p}u>KdH
z^_v5wuRw#Jhu+`L->+-cu|wqhjpY8Pq8pyitLDqxepD>_LhCZ$*-EVKe6n2%d*1DO
z-L)<_YlA=sGt+td|9=)ZHvj0$@R_o8^Y#1lJI;6sG5vH*xVb4cM8trh$T9x>Jlpp>
z9`~KM`<?UfQ0oe>oT&?C4}FO_)Sx&;C!@XJ=F<t|^EQWD`AT11v3%5_{Ow+KeztM>
zEk0?pHNngMbl&f{Id}K3cl-a>iRizWU;8a`-R{)0#~;TmpHt*@c9y9w_kE45?{|v%
zO^db*=Fiyh_{Sk`{U2S=g8gkx>;6=TEOzUanp<+o6V%y0{hEEn)4kvC?S8SSJ1C#s
z=J}jr3-|8Wix-R4rb}G86JPhUHP%5yfBV6ch0$G1?K_K~+x>XZTv9yQ*KA>;fI?V1
zkEBzxCi`rI7jvuM?Yt&_Z`Qug=d1%+cD{Y|;V{4Z{I;}ze}3wyY5bp&+_!O+BeSQE
z<?^{@Qs?)9+Ji@!rPNz9f4*AnepW85X<w_uKS76Qg{vAX6;2%%es;~B<A@BSi{O9z
z6)rws7W(dezwfs|Le7u18;?n`+z@PevwVKtt@8V|Cr{+`9k`vbxR+yo;W5cDP{a3#
zf6BeG+qv!yd$o@|No<#OlW1h$yVvvPBG+#Fe?Jayx>Iu5w<B6bV<l)kQuW)d)+H|*
z7&e4#QdWyCV^Qy|{PXd+uIY!nd#lSe1kD<MT$z8PBGa<HxUeqs@-p73VXMP<Gt;M(
z%qjZy?XCBdyxniN{eHLmveT8Md*O+K3?kBbI}}-_I7nFf>a}ECT@@;AmgAuxp)}#v
z`gr?CFVze~j_g|?TK9JA^+{G)S2VnocgJ$Mx*a-^u=wJ3fevY=6E;7lz3*sSD>QML
zilTr#OGPnv-m~UE$@%Z^Rlf(#k}#i5?rh&tG*QF<0>7NijiicvZf0de36D<0lgDlD
zZOe_W@##4D!Q+)^c+5jDhe{>3>)uH>TAUNM<lp~yT)y6f$wVt??h|Ldq_(}U*X`C3
zmDVZZ$UOLW`#J}4haSb{g46WWOd0}3)^2p`mCCs;$=D^eEbxg5W0KFOLRWpMC4vin
zUR<?$-L6x0_v(JX{o@k9W&Vn`6Uizjvt$fCterWx&Y1t=+ScsrpoKP-_a^IjTs}9~
zI@)a?XdzeErmEN_Oil`U93G#J=DJ*&$E9eHduz*Nf4iGkM3yDky}h;du+|H%ud4E1
zoUa74CU~nhTyT<i>Q&B}#6Po9zw_dmIhMl8C&lAy9&WmE`kemWFG7ZIjw#-_kyo<v
zgzh;-i<9k*f}0n5t0t_UyCCe>M6r8WtJh|DX4NRP@%rt0*|L4<EH;~j0}OSuo=6)e
zwVdwQu*5T`Bg)6&lj7%!lFOX=83N$i)69SIe49$6yGQ(XpEbLELHWsc?Z`>z9Tz&x
z5j^4Y>15FV?>wv4Cpj=2Jk~x<FMgklrbXJB8Se76CH=PF9E{#>x$M^{w_s7?TTo;7
zZ$*kCo9(xU0{mkAyX~c>wtk%cu(^wk(N68n1m7732UDMGC<+~#zT1Q2oXDn~9K4@|
z7Vfwfm2G+VicwPa6P05#)91ZBwUOa>ZnwC;TbYx9$EW%rO?&3O3%Fm@{EOr;J`?43
zW~ztXb;tcCht}`=r8O&ifvw45S@DJI>}r1{Ja>7>9I^Y4c|b=US7Va9!mIs#Pw!1q
z_3pY@xbB9#>sx+>12wru2Oizbw0^Z>@v&a%_RXBTBe%(lo@m<JH{(j@?6>AVH5{&s
zkKcM}bm-NbDF;efv$bYFxsvp|%&ozlr|GO<G{eUPM(qh+Kc|1RpA(&NVS%H9xc$b6
zqAvm;=ge~9aTm({ez8jM!+N&}B@5k?o0MNUo%ZZqGMB@3e{0K4_SD;#1RpCqOBl6J
z@(yy~P+@LdQ7_@65fO4hMt|>@ODSK%_G$#N6nGyz$y9&Z;rB<UBcVsOtaF&O*Kg)5
z7alLUNz9&ZVrF||-2R9LD#=$oU_7$Qb<^w@K@&S(IGkXPKT{xep5wzDm+4N;Cd?~O
zPt|LfJFRO&Xi=M@L$mtx%l`JgW}bU?ZoZTjfBi_<$%V(>%;R^yw0XZp>8mRaOW&!B
zMg2+LRr2ys*5c^xd2_b%pNMr|*&v`1$Fito-=nuvqvjc~%y??)(NJl8K|jV;(DmHW
z`~F9Ln=9^oslCe~t(F#4_Shig*AcO_b;sE+b=oT3{8^l`qjM3n?Vk^a_1M;X7k^P_
zd&=*vbGs;`sE=W1$B{>OTDuDwQ_M>5798f?aejvf_lD~y)Rsp2u3fqPZrN;q=f*Ep
z|Nnmfu58RRnY%scPNIzMvU6c;qh^+Q?=|5m5Abu`;CLb|qhHQeW+{`?0u6x&_7m%s
zZcBRJe$df-%I6!W@+NIhjwf7<HGCUwm0}I-7feZ5mtxMBt^V$-i<P>#jQ+=^Pxy>K
zA2ZQ+|HrstiqZ1N`#LOK9y|6gOnmn6QH?_9!DAhYg0}qg#lLv}@$%Vw_$BlHfSiRI
zJ{k@kEZbfPG)Ay)OmRxS>k_DJ{aogTLd(2bi!xrr?Mj=S3mfJg3x4q7;bFm(9L7>@
z*_{7ZbJ+aK*cq^=Bk?`&a<5wr?3%GR%5LYr6l8h!Fv-qhofwC}oNt$}Tsy75U&gjS
zY@=(}ceQ7}=VX;mShAmd>2T_~iOQEb+$uePw(Z#_d*Y=1@6I`DQ}eru&Tdwz<!*fB
zr0^=FStY)(H+55V_p(EoW@&N%4|B5fP1@a8ynUvvS7%+?#3MhVYdN&iYP`Ls*8KRW
zEE@AuM*YB#OgW7e%}<;{59wdwSkYn3|M&NI_o|(21tI6w^L1-G9a_4K?S?OpOsRX{
zqiLP<UUl3&ab)q;S&5;=5r^i!&<OtI*g5x_(WZt45!V>kFvOT=+P7csbpJ6keEI5G
zX%F^vrAjb*syx5{)<eEBbE<N%1BZw#Q)tG<-|`$Rdb{5kz5UE;aPj;hrG`Jwhm+6q
ziwbP7p0diJ`i9D5IpLVVJ0<78^p+g?F2pLhSx$Iws$;g}55uXyH+AWnoZ|^#TFz%t
z)V`<k>C|umiJS*Jdpa18a7KxTIrg1Nwrf3L+AK7oY{ile+bh1)PYW(#`>@(&S&-&8
z|CrF9eoGgwoUrhDVeV|p{Sv%aFPgskq@5Hm@Z8_}FyG9tQy8pPujT1j7~d_X>$K{k
z$GPwcT~-}p5*l4$j4PcvqxBv*_w19-GF0=FR}`pcQTh4E-YqLpN5Db#P^SpHSjs$8
zj=;v79~zkX3^r%!M6Gn=U{svI$P~D5u4IaMr+|hi%Z`M}Zx1{3+kQF3t$&2A<3_S|
z!(3yYc}tlZe`&HzYI2y8uU`FLqT|WsIa4-<*=w~hxOvEU2K?A@VlMaj0-J{?7~9T1
zJ3BjkMNFy~las=7jt#ZvR!w=|5wv*CT)~@DR9L(u8Ox4uJso<AXTiSXl8rt#A&W}(
zS$y}+6=q^}aY$O<@L+Y8Vn88_%1^Dng<jCLgGrz%ni+21xuW3Kd1IHd!pZPLp3o*p
zmqA3BDR5H%>Pb6}&wZctYQime-$bZU<K*#Z-aFe*m#1)giGVdTDhBv)xOnbMdeshB
zm@qYT$AtA~wo8??cU!nZTJ4jX87B4nm((x$?D}|dh@t|hzvCgrG^xf@I`AuWpnDN$
zrs<gbN=TPvf(dBM(rnSIeu%SNlm%3OehG6nDf5E5dxzqO8_E51D;{;$xSQyBeflVM
z`q9zupM59Rt3WIeVEX9z;H>%mKj-WJ9WHm7yhtV9#Y`2_&1(?Q2x9qEKCfT>Y{jKt
zm-pMt*Z(P;_$V~!>@zlbJJ2xU-(O$3FBN|DaZFgBuykwI)vPNk0#}EveRXAJ@WDk|
z`oGuj|M%<O{rR8~o^JhpKRzCp*IvJFl5XYQ((AGHf4|-|y`8uFt(1A5jJC`t#h=T3
zW*X()*}*BSrW3!<CTwnavXM)|^SR}A?{+-yv-_11r+D7LoAu-oV^J0ZzMHqVWL{qE
z-me$CYt6nYyJs_vcK1|Nf4iAJzv|V>%*)I6em-ZNdZ}BXo#V=y$jO;c7Bur&J(=LV
z?OxSunQ3!k1E>C0vzEEGK7Rl8xazY~+n-L0&f9oYOhZ6Ae@|iA+0b=+6dU#()zhE9
zv-o-4_q*j+PXGD3zW#6b_rA#Ep>f7XjHkBe%6YZriv9h2@WAKK=k0?Zo88VSem=K+
zTh2`*{;kVbKYBVne&643w~zNoJ}%jPH7q)J^?v&osxOv-rcZY5={hgf_<sNYf3Mf?
zcUwBow%Y8NUAW)pXJ;dYN?%`F>((o!XVl*4b1x;H)zA0)&u6pqXZ-l#=Pn@l|1iJ(
z0;Py_DYG1liVqKdetzzLL0fYh$C{X(MSp&LbeF44xw<M;G9V{%`@O2{)Gc8)!ICS)
zR{1R3^YC!{=1Z;V5yo@t|NY$m_gnNo>Cay-`-7Gv=Y`!(?zf#G7GL>vYLkiD|GWME
zz45hQMYa2d{VWdNJa9cSeJ&$2+lE!=DwWxJrH*ubmJKYL^5nEiea1!kx*reutlv1?
zU9-H6PxjO${V6^ZkG3Az{eIu>|Nnm9Rw?@P@%aBwr}Zs!_E)I2CmdU@vF-A&@9*=k
zonz*+XgKP7@7A`2gH5&HZXUle_2#!j-1;#)3KH*Sg}c5KNKozN$hp03EibQj<~-(K
zkNfQ}T}&68-}&U!-Y3z!N>*CBnB?9Pd3Jiy-BUv8EH^eCP2s=(Zr|^BHw=p|g?#k!
zOX$f}G)_CSB6_=?&F0g3yPtHWD_Na3In`8s*7W+4W4z{fKCD{3?$q)ly%{qm)vzq{
zpa1Sj{OfCLZ&rQ{))qQ;>D)@Teac)XH$G`T>~lMLYp7&aj*I`4DdsHqs$Q@4HNQJ0
zlU1Be?5pUSaGPL9hHfS6>iV>wXE@Khl>9uczrSQ!=VU)C&c)d*8z*t~G0f}FX*Zm?
zAqdn%zISVK^2zk->Wu0=Pv&j6Te0Ym@`s!0^K&<yRQvE+bNQU0gSU<vi?VEp*z|lt
z`lV9Iy$7$QteElU!&2qonl%Cr?uQ<>o^0Xop8Dra-_xK^zH_anF5A3ay}D|R#Og^>
zhebJ`&HDP`Fn@Y%YlT|&i!Q@tzY4Y3<An-qVt1Pb3oJAC<5{j8Tw^1+<NdzhtHRfx
z`v98cw`dJ3Q!XxR=jMIN!2CANdAH|eHI^@)W=yy9_x~-u9_uczzw^l?`~QE8yU%r{
zZ(>-xVsprx^h>4k8;|pG&+d@5E=#ezy1=oyWJ*`RI@58vYMtK)Whx%W2>-5+SgXB#
zuHYh5VSdT!e>>IZEm#<l^Lg>=T~(qQ3QRlNirpp`?*D$TdUHlwg&OyTE?aNbO0_M&
zD?D<yTy*>JFwk9g>0_>i0;1keCaHSocs;yPy)>&X&?<iCy{rRISf9@=JSO=p$TedA
z5#@yETVAN;or)|8vzg4Aadh6&CY#B(YWbFzzqs&lvcFyC^>wn^2G))fy_U^b<iPO0
zQ9Qmz@S%D3wKdNCwj%7ibY{&x8O<B`yJn`oaDlSf`lpqjpQX<8p2F$Cu)S3)bXCE*
zg?s$wJ(|hVQ&$#yVph7->chvoP8QzI-99xuR>SV7MCY^g?=^F4KAnu*VlSMXI_2E-
zof~R@zuj*C|4;FrX&+ymv6-$R+`ynM_f9ElW75&r>-Wd4d)JYEiZy&v$L41<Lj>e1
zpG>@+x4U=u@eNiFqNjT;n33eda5n$blam&E3ZBc%KlQ>XPi6kzy$4Jyre1WJDp|~u
zbH~tp>gu_VW)>ZsYVhbyg&G6X>$RKDS#^8ZtljdAuOax2edWEzXrY&^&S%~RTE6$o
zQ<;CZYTL;<pVd|!n4z4o?#Yg;sQM3q7QWe@km-G{^wnWfof<>qy4c<G%5G)enbhHE
zZT<cuhd`0?PlM8+_S~<oA<LGyC~Y{%CVfFaZ;5XB`y<9ro3s;;-2B+X&m_M0Ucq7B
z)$4X?-LKno$kXY;tn77(hx@C~<?Z|V?9MlBuTLA3&-f|dPYvtb&Sdgwm6q@d7oiUi
zxc_RsU*uym*(2@5hTxkYd*Xk5y&m5mEPCDzR1bhnYWOabx9g=^l}W(^ho0M(Z=$9=
zdCZkN@5wXk_OB~7KsB2KtKb7OKbhzPt!=lBwIXJ1Ixfi=x9Mq^IN!G0dnI>fWlec<
zI#gB*T&+%UX0h0Q+|W+$N8YtHk$l~ufgfEGPE1fV$+@v1@lM`+Pi{+&6Vbdi?gHMQ
zTpJdHYR*R;2mXG)@4wm4=Hn3yGr!zZrc5zMPSvKxygbyUK6#U6`qjPF<@=O7RDUWR
z2%ID!z_ii%K{LPI49jA*<=^@2emuBg>c_iEhUHA{_i532Cqq9@s@drj^c2)M;%9mD
zQ}M_=J+CwDcRsFoyx}n4>fq&k`L$18<;+)!w_yugE5PKWV9YYF{$C}>$Ep)OfA)vG
z;eWX7-&ynfEol$ybb{W_@YQf+R4!aw|Ltb_`(3Z~;tU?h?q=GyWJ759+Ni4`e>^9v
zJzb(Ysm8J~uoYybI?IlWGiH6emA(GVYtYKCg5c+>?H6=7rgm9x{QTn2iHXY2r_bws
zT0Xz-(X~^&=65E{-r;ZYki~45#lQ21*XES(WSaOwJho)w@0o8H_<Tz)$W^SY@L#L~
z%AhMMf<wTKCIu<RXSMJ4KK4w><Lq1ZC3xrUob7kZ9^c(}!TQ^U6&4DOGd^yfV!r41
zyWRHR?-V~hE9&T94VuNUF<s^}^V6r(`o;aKlYXDGe*fmq&dsN<Zcaa+y?*btzdmIQ
zH~ran%1g-F?pi)`&s71{pH*L9ZLRp2v~Th1z*V4jk%B$Pp8x-ThqT;q4x75#ZHua&
zf5nG`?94j@W>@HTM{X^Ae5`Rv(cdSr2Z}Db3o9>_uX?fYFu(nr-?MIS&+ors_u&BZ
z$DAim{ssHnem$kVKH}S*FsZvsnwEnWva`2G{z{nDm&bFvaZc6)>!zHq9VU?0Q&KBK
z-`p+fpU;}>=da#X^0Mfh<?}n`_iLZet6n#M`gJ*hy5;k#R=E~=Ke=0eKX*~9_>+m#
z8`TwVlsYecU?-T<FQK^oW}5Ui{+;id#dkhGKmYu13wOE7C3B5pw&h4_NreThi*QzU
z$ymGbm{b_IQIB<h+1=9cyhRUPne<ZmPQ<=bcHQiGHM^2w>a|;|OBcKKR=r-kUEq00
z7kmA3zquN7zFl@-_j^%Iz@oojUtizuRr&GHN%i?}?(Q}}{&)u$gTuGG<?)j;6r=Rn
z9%fx#r5L}s|6pWb>#t{0*#WiB=az5q3*W#SXcf=e9aaX;ZFzzlPK&&~b-rk7Poqfo
z!#6iKGoNep`K2MegD*&G^M5|;Hw`5RuWhaQS+wm!#kccYe1aw#Mo8T(J-l;Dbk;>J
z1v&dVn?r4L58bc-KeyVAV}fAPw#5b;L3>utT(LQEbna;zqduF>X7`KF+n!ka_uK9K
zFE1|2Trexl^Ze#}@9XvW`KPr4gTmLvyqsil^V^HX{j<vX-K{^Ia&!pNbP|_(^+?Zr
ziG`_<+o6wYY>RGf+W+g->N||b6i=*Px9b&X<H*jg54(g9&6Yk?Fn^)L5%o#8^TbM~
z9#c=~S86CeIy*$-WckBZ@uau{`74#z)HXC3`W~$oJuDOv;ku<uRYm;#`%S0yY<!Lu
zwQZI7FZ4(MXXFp|2lkiyHCGx)|1<FU|L^bScc%X@)&BZ&(yH3U?b`2{wWrsl9NzwX
zUUgCLCi}#R(=W#CD#>(F^g6}DXd-u_g40s?5kv3WCkuskR;=6Wmb0LD;X8rOkObk6
zOQ**v2_0<6YmBjcHY2%bUj50!$HxrI6|F%{^A_I2XDy9c)U7*AGW%`6DKJb|67RSB
z^`h$QtG(avRZFM~T25iJ-8^Ttlp(Y2C$`0R?0&y_{7Cmeck_$x4?J5RKRLu3XK-VV
zWpRyBidoW(+mlv)Y2wyX;FJ|n>`HR)z2e=gvCJde#AdpnY2m>p))No*)&5R0Q{leM
zxAQ`6q*#FFw)mEfi~B<le0e|T#||;KPmOkaSnoKTZx0o9%MJMl8hU?pq;umTEA`%k
zZ3Q<|r!Up+QssGl&|7aO%W97Q%o1)hl&ZhHV2s{8XG_eH1uu6$jo!6LNanl_!}$Z2
zk>3S{!?wJ%-#saEt3gVPLg%049|F}jH(C;T9beq}(2;p@5$or)vQUlb&JEtDIg%Ai
z3}>?6&~={v=%4s=rN%_z)eH8%I;V5wzxe<1wNYC)8AUd43UQL#^X*plkqLr{&;Cuk
zXuUCXUhTJ=J=F_%&!q9#t`hwnH0=n_g|*S<HK7;OWDTVh&5e?SpUU5N-Ng{*9<u%O
zIqQD&f*GQT8|IeX;<VN(I(P90qgUOtndyD@|0+T}yYp{sP|Q6b@%sQXzlB#-i|CZT
z1XkDFr5_Ks^DmclYfxwje4A)9UDb{;JKb;Hy1rY_4{tcY&hKwG_2SQ+#m}vzt$vwi
z^Ts77wI0gqh}>tCr4ur9hI>V}R_t2|)ri(ahQzm-CV$KiEhtd=&0eOiKk0mA?4_xT
ze>5pHR2@&6lYd{l=V#%82F4zp&C+WR&1k%k`cB(}dveH1&b0x76Fto@?z&J~FxBOL
zlibPnIfs{cR$N;XS(7|F@%9<ft)LAW`FlRHbqOk;2;{0xJ;V`k^oK=~_y*Q#p%0ZJ
zg%$=*OsNj>S=B3LTC{Q@qekG@A2af-5)ZNX%#Aovpu{WU_pM~=E|t}X=0~kPofG}5
z{mF+}3$MSpcB9=nI`n{tbosL5UE38?);{Dq`#hTWgK$_yTKyK8Wjxo}xD(0}_P*Dj
zUc)42s`Fg(XhZGE<(}ed+}u_cw-<J5oehjUBeF6wB0oq_`sm8lHp*)ALjL42x1GAO
z=#Ru2Z3&Y(H<UXW<*Z6Hz6n>{UGBqsc(H=;t*8#h$%p;-6(0EIo#J=RcjeoY^*qeS
zKQu19d60?sfIDB!rM<ElmC77UOI!b?NIjkL7Bu?0Jg(;B(KRbi%h>VAT{(B@-uc&u
zc*OpG@(ukUs>Si=<fJPTH78gyt#IL)7W=zXnnT;Ke8xeM5@|uUqjnPZH$$ASHiRZ@
z;90w7p<3kWY%6WvzOd+<2bx#M?w>GmOU;`b8|UOUu&>+qD{J}_rm&&~t!jH?mNr(+
zWN|pWX7~U7$1mTAUvm1~ttY4aqZ&*@cJ;5V{o&{S;F0n2EjqGRB@FKWs$)6z8m#tT
zi^^7X(qX$g&o_$QN?U&!r~8?nHrY7`jhW6c-MFtS8n2lX&^L)e@6d<FqYB{@^xilX
zEB8G;dZ8=sXxi+^Kb=Qz9r$$o#Y3~Q88Jdn3wn-;o|`FM)@fPu_(or3|1@U-gR{Yl
zPd{avb76^LhmUwN<G<H4zH+didB?_QtZ<FLohMD^-(+3o=%gc?8w|uPzVYy6`Q5Vm
zDtlqWiJg3GtMsm)|J1inH11xE+Wwu#4{mBUt3AS%qMdQ*PHO9;r&0fIrq54IGOBr@
zb1Y@s@tY2%EH~tG56ei!JPABClfxr*jbVt9(isl7GbSwAxw~F2TeC5yOx(bi&v{z+
zMuuq38yh(nM?@IEnVqOQ$yVaW<)h}Ams)xy!<qgwtxzj_s>j!;^V2ga{`fp2b#ZN1
z8Ob~QJ5NXGG=A#a_d5KVWGtt}<_*TjS1}z{`@g~L#ga#_88`4uTg=1q<9;IZq5hOu
z&@K&y=N(6WbXIpPo%O9>;A`jGIlIp?{C>aRzAeDsRKV`rjpT=(6&3>L{msM;Cq&u!
za92l2@GbfKW7UtyGkY7p33T#uGO?^uUT`o+SSfC&el6@QfG06NXP71X>}ropc>UdE
zYkZ)Uyus7|&ARjcr%$n2l~&}gbs^&hYb*b2OFcOWnHkm|s`Dl$|8weD_-n%#x1K~D
z-{lOfznsc`uvNAdB=H{gaXG0m_e#^RjvI$L9{BxNP+7scT;FW_^Ru&$|Kk3=TYtiX
zqBD~su3l8wILX*I*g`EPg<okxrbwLng-xla9gKf?1_~tbfaV-Gv|G6A|1*!McWlUJ
zE4FI<eBOS((}U7g$wJTNo8L9_Fg5&7Nve6v=$5|EZQ5qh#?YU~r1PKL(LQ@X>rjyR
z&J+8U3|GF@TqNOO_Sr(Oti#Y|+u7p&gQ*Lf13TiH`87VynO)x{`I%30`<)_hF&P1a
z_zj2o|9z2N$Rxc%#a()XC*yJUYY`e;d3PT;AKB7$!Bu7Ryu+M_Hp)k9p4kx4WO>-@
z!I#Va=|3jKN!WjEZkW`yLN?iAQJ+x6?j4gfyQam|{d`&=D{Xe=98>pHc`;UtmR9lq
zw?8ag9IoKZA`#{$Y}xo!;Cx;B<;_p|);yYi_`mGENo6M(cWca3-nWq@UGiUU?k1-I
z%iR;#X3lW<vnV0(2+!_}x|Cf8(#Jk*@cm-PJBi0EZEo(w5U!c64|&ttu6TKtT2HSF
zY{^uAZ6o#Uk!Ek^lLltKg4#k)SCJD>*0%jW)n787hgIjM@$xOZluGQ1?`@tMc)U+m
z(t8=(b*E_$c^LK?9Eq5*b&q%IV}ZYI>^z;C!Os`VEEAo6z3*H=)6oD;mJ>X6N@+8i
zXQap)GJfHF=D0=skdLQ8a(4ks+V3Q>Z4#S|J$GKY!ce^B|1!2?yj}@S#Z$_{6@^zE
zy>a$wPKOm!8RO6WNxRgKmDr_4WXl=qs6|XJs^ds}vq|CH#2t0IdMjf<`2mze7h26P
zY!W>&sWmX+=0X4654hy>t_!;Gt@L}Iygm7GulX^qCme5B)7TCrSvd08-pTMTEqf!A
zn#J&ubK8a6?7i~KIn94vxZQQ+Cg+=Dn(@9z*kvP6-#X3ZF7WBHNnO&6wuqO>(-LZv
zS!!mlW?k(p9_#|L_;nO0BcA!XM)U0nS*5c_g*QxPnU<BzvxH;ufk#t67cY5a-?St#
zDCyxL|A^M9yxEc3jf{u198_KhwsA`Hm<6?Pt(=m!Z@Srhf#7B5j!!yWCSMqS(7rV=
zE%Zp#v}v2qBxUAJy=B9|c)BntNAdKO6ZUpC&CgkiWW%<z#;iJboU7reVYKeCE{*kn
zoDUw}a_v<A?ee4U_rGeCS;_k_f9c@bm#A!3^iciKx5SA}9Z9`s{DXd`UHw*{xq8hZ
ziNh(4lUM1Pnl@P8b~WN(l(6${ys`E{LxBe$TMuMhUdF3*p*BJ>$ti{LomaxlXNM%#
zb6l1X7nXHkb}&z0=hE+TcS*{*iVx>szPI_UdO_uxV_2o0Pz1yA2^&2YBr|O_c$*@X
znshYp*{%j>&Hr+2i?x^UVDd1!o?!g;mRPst!C8FiEIp=7ClB3F{3ysOzs#VWEq${`
zDZ?YK#>k#fkySQkdRubC1G5-5wtsM*>~_?}vAsRe(9rW{U67XT+2msfLl3Gh++ZY~
zv^}9WX`%-EvX7sIn^~4sT;XO}lD*;PuGr?-KrR>M19uJuiMsM$6bfhy_-onQFq5UR
zz2Tw##LULCQP=KI;oSVui!Zr7w=?A5eb3ar*;fzh^6*ZSSh{iD=lR^uNmDkt@^s6!
zc)j=+laYNhVxiKhl$x6t64w~EZ46nO!ef&+o7wP8l{nKG&MD3MJl{BP^)nq{|L2~#
zR5fq5T7dZK#1fvc4;qzUI1D+Nnfo|rFccoDbNg8)wq$kM$s<86p^cF`SO0RJ?%%0$
zb7O#*W90VB=5Y)E9?)K=QMYS@>B(u~dY724v2p)ida%!B+V!LcgU8JEn{|%zwcTQu
zH-2i#_$(_uw(~`)y)J{%infmE>^-*+sa)N4z&`HP|9Sb$JR%bR=btm5vFuh>r>Sh>
z>nXR7ZSLEkwY_0sqvq|%&ARjQ>m4qByJ6<mqxiq3uHZ-`gR!i<TW{g<jJCx_)}CiH
z&*(FHWhHK4nz+I0@wuPukAGfyc=+E*^$Dl*3lu!<FDo`YHrW;O>Pq7Q=X$4^X|jhE
z4jHT|?_Iw}<CsX{p(MejU4ki=PbHd7wf(ZzEHzl@eCC_+Y2OmBYtDcEZ_@g@+Sz65
zPMtQ!4F?XdHaXVuQ16L+)$-Tk)6{!xb$Tw`?r=G@@qe#tV^!oDWre#z^FpHf5<8t@
z%9uLb#O~ZUKO_BxX7-LN-N`k-j<h^h{rTg3`tcbrd7RA>1I*T*ZlCSCQ8%$i+1LFG
zXUq*X$M42dFBPs`%*i`L;;|j;k)Q^K*=`EU_SoM{b@6n)wedhn<x0Vs$2U!}6nCDv
z{<b7<_l1y@j)(i3LKS`ZXa1M>Rxy-0RGMBJ6T8prtx~t@gY!&vvvb!bOt;xt(cC)U
zY=KOyk?~e#g$Au>bxE>H+!qr6IkD`jus^VmyUka`<^K_;n%2ce)#uMRzWM!Y*W9PP
zvUZoh-T8D!-Y(m~t9kv=sK9?UugrZOa<D{nD{Q$WXz<nj%)bbWn~HpAy9JN-w_6H^
z)co<_PUiRM<>_@?d!$QXR?5VV2s=AQHucSxKPR?mcKItznqLskBDM0%$&SuRH9D0J
zN7hdHd?E4M#)!j<c7D!3-@ex>_<hV(pLZviqfg8@ub!rz{>kae!J0LjZca@+#I<IX
z!Izyyr$h}Jk4s&6&aYx(oiekyV3(BOXN{~gSN1(IP7*$|#`&k@j7Jl9l*mNpl}T{C
zu+H{5B4Kzg>6w=4nhOj6>TNJ@s9d{3b^hxFg@kIAH+>Jxk~|ZRzPj;;JL21+Sc|w{
z83!ufoSNa!6?sGa$sD6g6FmhO+cq7ovJc?45Bw6Te|XxO@65>}wly(Z8m0GIJNg9j
z$S?i5YToK|ZT!+Z?k8+pRLEYc7URCAK|rIB<;H@i^^(EGua>-;@N>TJdDoo`+DZTZ
z2VXzhr_jzQbNpid4aT(Qx0k&2w@xa2do(QJ-)k}BiVq3Txaau(=oFk2DOc`wnXmI;
zMJrconTn5F#>ZzXoP?eyY})s)+0<HCNi;EeOWK3GeeDu!&jw^Knfq^{mG+T+6W?`Y
zt$DlV#mrkVLcGT!nNMbAE!E8JV@Szp&fn#tnmYU7gu>Px43ij~**&bD`=2x3aOgmb
zbcFm49aq_gmx5Y36&7bDE-#g4s=0Ynfibaua?f9zJsP={Eay(xv~a2EJlZI=aQfk{
zl`{@Vyx~dgZ<2Ffs+?fTY`=NchPf)zcGnr&IBP}NW!(F#`HF7U+>Bkqov<xcU1EE6
zv60HWQi(UmKRkWH=Nvwf$w`5c<H5eZ=bL5U`d^=`F;_o!Ud++o(#c_Jzf}Ku<b1m6
zzoz&{aH4P0WJz|GG+`CPGL_p!htC~8(sIDz{9`p%HMLKVSQE+xKc3xlY)`YMjJTT5
z=O5)8)|^}PF(X!E@eGRz&SvI;g8SGSX5VVP@X65b0&C8T4E{4a8Kv%Ue9Swrv^6^=
zFn#89!zqdP)1ReYKWM`FETBgy<#=Y3Zr&Guvvq}!n1pRiXP6isH?&Ne&Moq;?h>nw
zDMO3ig`8)#ThhfOpFMeYVv=CO?dGKn>#8_(|HMWzvtCi&bJbTlCE}sg53ZN3_tLDC
zs!KZ@1h#!lJ=(x6|IO!$wXw}FjX;q-$1M$N?nv5g+K_r^68B`OO{ZQ6#;*9fCM4kN
zjf1W2lKMNuqcz01r{r63&E6LK<B|BgLqEb6d@ww+!#H75>-J3hGw0bREe>?x;ArF#
z)aZHgc>mpPDY4o=(~l|il|B7t>5;VQK^mjXx*7TwHO&*H)}AYWmNK!=t0k``CUKFY
zvdMS#PewZ@$SRB4O_u1MATWn@iOlMWl3lL5^tZ3LWgcsk==fld$AmREVhd}YJMd?*
ze44syF01^_<{Bfx)TfuVHtn`EQeR~BsH}0~S&l#XDl)B~3pkZ{XSUz$&Gl1@XW5gj
zxSDZU;Iagfn40<xoeM82pJ{02Uuhv6w0LTMFaM9V8>jM$Tx0!XbF=-p+8nL6zimgg
zdhYqK-@VDNGwn}Fx5_LoW`V~nmP~RJkHyLUGU+nyX;EobT@x(9HbF+CX->VuflDo=
zJ#Vij9$BW>GGQ+xukF-LdIycpyjbof^~zOg(U!ynN0v=0i>#uWymI9ZO*!zW^W&jH
zKIbhf7oM=cmNKpTqD#%DSqtN`<&19pd>_g4_~-}6wsPyU3w~=X{`bFW;>&0bj>aB=
zg!&ba=F7-!UQv-8;xte2MBbOpr>8z&w{n-4lfvtZ=dF@eOI?mW6K>>jw-?^DqIO@8
z%JPmD=!#VlQP3>*Tdo|J8u8HnD-ocPJdji%hl{73o^pcw-1jS(ra-qvJ1tltpwZLr
z9q72^7#C<YYm380g#(6mdv3q6J5<s;?TL4L&m+iYnFT)t4oJ@SFWIg7bJ0ie7%Zn_
z!)y(Uph@SwyLTL$+`W9KeMhtZLh~+FmYYg%+^*Qg3zoV2ih*X!Ta*ucJbplQ`oog?
z<3$D2*u^a3r5%Om9;x+=sXe|kh_C$C=8YG>c%4;eYModkV{^Q`?RTWn8!d76@D|pU
zt<8sJ_@6H+cs2E`<Tl5YR@a8xA0J9q?=w6+ZHIf3@e=+Cg*tPkc1BG7{3&L$kjK^Y
zDar>}rD}hF^WDp2{yDDLTsT~}=q`)+uNS-z&lH~KPfb*j-#)V-jw#qdZSIqKsqg-N
z%71@GWxD@i--J8M&bv;(^X<txgKwN7efCUi4nCZDMnC>(n2(<4N{{_JoZG8U?%dZd
zea`5*s8-P8Psi99zi)EC=KJW9%exS#XNmDoy$-E89eC`NgF@DP{_hg&xSs4b(mJ=t
zM@4>mb>ZgJXOT=(_QW6EW0`P(VfP2)39IIJx-?{;+;MHEQtaWVqp_)Xw8AGkO);Ox
zQ4nZ-%xCv$`KLWc4a?8Xt$MZcn*lff<D0(icmAF7ZU6R8TlV2Hhd0ySh;Pbl=e>Nl
z{KUfay#0?;4cA?LdZ{MQ_DNaMZo{9SPU}B!eHgd5YT{h$r-pVP(>PY1S$#)V!oaa`
zwqE&z!dm;4Q;zjE^V#x9toirznV*Z^gQsQ3c-EgTSDoUwAmd(E!jhi`25TloTt7d@
z^0MxZ4Q2)R)jU6N)g0z#&ik_?_(sRXEWJq#K4<n$2{8KZ=zQ{sz<C=F&Ua_ciV7P|
zRSQBl?!I_mpMC0;Q{|B{TGPzeWvPhTUUJo%;Lj4lZ#b(*^YIA|Be8di+_MhuFkYtO
z__$w?nLT_7pNz?|G!N??O|t_}dT`o=`7o3f#Xb)6-}JeVzkG)Z(+2H3-dQh%xnlNs
z3cWV4?>@xp$nfu=Rzt)iy`qSw$R_K+oxkrL5e=X7`I8cFv*Ztf1h+?pE_wFPr-iFX
zDoyAPovX-o(miv=lnuv<_v>AsZhT=1XT8vad7)kos}#cw7a7gp@b*yZmo?9JhF{y|
z|I@~<*|qD{j;B}i4BC$caVj)uPE*NQekZC=^U!>rUZ4AI49zE;j<gx+s#)$V)ty{@
z=}*^=ig|2>{hM7kwIseh9DL&4vABB@kEE)DcBbCQx0<UcH0={_<eeX9&VFB4qqpsu
zZqDxYGgBx2zoy~(@f`OQo4wEUn$?7&S-P#mnT|zBoWARDw)Ies&#n-?s<QRli_J~?
zj~-~!+seZHRJOKx#h#sSI^~zKoz9!g$DQ(hhvk{W$sE&O?Y~-gkX2mZq|A}UuJ4Xz
zv8^kAe=qOBj=wX%w^eA~o^f_R$7~6)H&YrslD`=;{Z7+znG)4Ig`atMmFE1IjyUhs
zysz@st(G$*bXS_}KW+VsZ|NH2ASUs%#sy(hbYm)PySUHlvbi>fyyM+I-$(Gz9P9FR
ze@*T^c&hokbGI7*u|q`~iAM?)?T+-=UDKQ-(Be|`*5awO?VE#h+M-VWI+GOf^l(~?
zOP>8sp}9gEyl?vNm>d0mC(qoiB8&1sv-lr%PUuYSJ~wl&!0m6FOZFMd{Pj7(wfX#U
z33j`fmj1Jizmp<zH~w(?b|cyOj9`SE!ZT0fd6N#FU`XB=mZa4<uhh!hKyH4rt{dk%
zpB%mj<2Ss@YlOf3;O;i^I&i+#Rr37WLSg$n?gG(1vkeL7=h^N|oi7woW4Kr7<SU-5
zjbEiE_0N?P`@KZzTZMI}`ppyFW(-H}d_OPqYE_uRe2xvBhxqiqsm8rM=uq<S=kwdl
zEqkgCPMYJ&uiVGkt@g2x-QVJ%t66~obI!Hp{_@wOrZBKgU)jEVLB-$Z9@DHVpe`h1
zMKdRNTVtCg-@JulT}g>~*%n+6b@}GiBpp0*+h5>Ajq>@V1C1(y$}*>QJOc}Kvg<8+
zpL-S57i`rI5MYTY{%o}@wD;{qb6LsV7p5H$%E&n*J*~?ldFIiWC-H@?T5Q)RwoKp_
zIV8n>^JLN4wY?LvR?K+k>mIi-QS8xO!_yw#6*rq#wk;J6O{jBtacIJ{u9cJ9lPa$0
z1a#M>^*LOzi+{E>WX>c;H-*H?1sWDTKUo`(OR@jku)HZjt$Ko#v*j%Lq<&5QBoiC4
zY@KB@HqY6tu~hLxg~(Z{W4rBHD)t}qI^f;(`0tbhUR@R6=AXBIZ*#>ZP2=95WWH3j
zR^QxUfvHUIb<*_CJ^k?F%!P%{-*2YNe~LP}L2a|dW%*(~-Hjf#&v_ZYIZa3mlXX69
zG>26{Td~i7;pNS1wh6i)+wjZr#*ULrAKQ569Ob<w*X+l6V}ZuO_#+FpZ(N?Fny|S^
zIO4^OdknU2Og&;RSXv@4E><#_Xy|$K9rsEbbr$KvQ7>lcEElLSQ#_<Du|7kQ-+SlG
zE4voFw^4qe{V1e2!pUXnw#H75`$cX1b)SPJv+plqd;D`{-HcNHM_rYjFI#Kvybgci
zZNDMHqB>dV(3$$9J(X?zo<8n1$>Ofdb{TD~-l&)E`0Vz&m{m-vN&@01+yBm*ZW&vA
zb}HXq-K*+c*BX1aifT<?oPTi<tAGC{mgd79M*<!knYXx*aScn&8};9YYwq9N@>u=m
z&$PbBvn{9O{|?yk_DcZk>bfrs`?H=|_e4E8wBzId;urbz1Rh*zTz&FmPj?~XGKmV#
zg`yG`({wo0{bD3lr5a8PaVY%XpckRSrlF?a!x%BACw<1Yh&`XpmK0kloNQ5wd=L>9
z>){@5y-Dm?ny%_e@dK9U?Q?f&Y!3SOdST+vM{XB39SHpQvqWuU$D$jRN$D-F604;3
z+l+cQ?0ysC^XPJ_`@2mX#@$o2=YJKJPqr6d>F^BHyjXLUhey~;?vC}vcX#(mtxaF@
zGi{T{+s?v<_g|l6L_FU7{J8&{$$Cbn4Uu!6n%zF?6S1v2WoOSNks|>IGeb9jcrL7A
zcICdJ!{Ygt?hM-sQ*TwxJXmU|m%!W}+R)#<rNO@7VovjG#(NDqlh&A29J`v@I^&&$
zhJ5IxtD^G4XX;iJ&XQkTU?b?TzG?T3N=fs)m@?fI;r6%EVJG)YeXDmZ>FzTX`D422
zKeuQl@QeA+IC!HmP3=g-d_R-4>q~^ri>m%KxN8t>aC&oN;7x;{N&36=Lh5{23WBs|
z&fZzG*ilPznrF|<>bG0P_a5HVwXRcjRrU9GW)o*#F#W&UA#Sr-!il14=01jUeep1b
ze2$b8W@ZIBR};4KZ8<A%v3w(I#*fan<4p2wkMEYf;e8{vIj8)Z`s0}a{E3q?RCt<G
zO#^b;xZD+{*F9rrOxB$(T+ri`v2kY8TXw-z-UH`ut_)WHWBK>)qmtai9Q~&_HVRj3
zR7%)%Zqb=D|Df4}>HWHM7k~MoyI8hQ)>>@gX_?d`TevRUR^GJiOSo0i8)(CE<mh45
z&j$}oIw{h;?%2OMzY88oYz|fXtT4^9Cg?%(j5c4{8kd`=4<`9B_ne;C+A?=#mqFx=
zXFfikgu}(d^%;G{EEcd`%bxI3Se_-vhne}f&fx<Kds8p@JrSFx`b_N`Q(^g`s!rFI
zp0sHVwj9504*T!ub`|wGnkJN||3=7i+rdQ#T-;LBLc5nKaLlmgRAQeWCw1VMK`|dY
z|BjpO^JjD$Oj`WcJ>taQwAsh{{neIvKT33+^XbOY!$o<A`u`?`-TranhM>;1M?az$
zrWSRwGw~l*+g4&T^PzbWXq}KxJcoCut$d33vl(jJQmxm#RAO#anozUNq-^=74=bGy
zZIo&XTw}aJZ5qp;0HcS@KX^Qk9cOIZEt@4{zt*AbN5tvJTrZ|2?{M9){_vmHJt-Ue
zUp;uHBOz{{@oYzi?&YgX9qtJ<<jzRVSuoLo!@lfmLDD9+2iXDZ8jWkuZRFjP*e38;
zQcX?lna-uNj5D+*bQ`GX1-_`sVc5LIXnV!#xduEr^SCRST|}pHGoFiFc<<rdmYn3D
zP0!W;G*3Hb$?{D8+=k@dBkgxy7F=vyyy)S?<6hi{HK#GF30JUXcBvSj>^d$y*>oAJ
z%t{UU??q(-XLodY&QJb&M7exs`i7tjLN?PZU4>&aPcAdk&=FtMu6F8{r0@fY%rt?r
zXA*UD9!0P`JlD?FrY`FD<cGyW)~?{NRi>I-{$5=b>Rgs|qE+0mVv}>Ty`i!4a}A%*
z$)#<~hSe8yR1bZrNGzPe@0QZZ?!L+T#D11z<>!hH9?>~#F@v@Cm7ZV5+paUxOlJaq
zeBb1H_CLevbhdT1zrXRwWaqN=S#oF|UCI1qgU_s%VEM}eVoXOQauS|f-@IWwZz0RK
z`~>xDZfoX5G%nOusIs(h6neMVVspWo3EvzVrt<8T<SwhwnY_w%vNlV=&N;49-u%@$
z?|K?T8`Y;suKUUP{D9&bzd3<Y-}BWN&v@V9JaJBMuJ!fPOI$Kte)+cU7MP>Q)?Oep
z>xORHX14m$9ELTsBR6gCO*h#fmc?~SX7!)5=J#dz3M(TEluImiPF(7J6Ea=#{w^UA
z4ut@TlkKG)yX!5w64oYuSA8IGhJ~@waGI~liqr^^+>2+6q_-t>rmRj&6Yz_%{Lj=A
z$oOAmlE&GeFBcp>=y}G@B4yL3M<HU-77_w0PTX?NoD<pB^XZ}Sn#FSp3|c3(C0==S
zLvyw96xRp=Q2|A%hEV(BJ_ccd<`>rreR%(T>7TRNNuAAlN$A<G8KHbh$IQ|q?iduz
znYgL(aNm(1v#wiwIhNnE^?uGWOXJBxC*5R=xYbhbvu;ZfO1XIUSLghS-bYmos>Q;K
z`aUgmzq9EtbGhI$$r+xB;qRgoJ}Ftty^860x-4XWP^&gS<NWA=c|oo1z0<Q9iY}hp
zvO~hAD3WFOe7yq#%cr#|@SJd(arvWq<xM-DCyQ#Pc!#f%Px#l^{`1E~uJnF}u9{u_
zl?I>Y_8mBy?)P$&#smqLP11kg6sfXp5qj@&ZSnllV?yjUl6TKPd?9|&F8*6ktR<xD
z=)kc=K|uAV0so>m*2;-qajuXPJQ@T##F_5AI(lqR?2%dPQUfRT@8#MS{nKxHB>YUC
zq*jLAzjIc9mc6)2<D~dRU-+)gE)ho0eM+(xrqPgP)|*@$Ch2=WnBoiJVoED`eJW%Y
zPT%{s?vPG*xEZWhZ0HP<aAh_B@<Dd9*=kG35^M#QP3{W}wogyJANLft+<Sr?<2w7N
z5}@sb6XvB%z2jk5z4E}_6#2mUlP0w{^j25KuNFA3(l2cEexhy5q?3=eW=_}iQcU0|
z*u8%D19Mrmz-#A9DuW)szP8p|<-FPwHBGgT95;4I^QYX4{OI#6Lbd<WJXfBW7jyRH
zt#aS!s$f3p{5`h^3+LbX7R<Zah9&0?^ZVo}k#j4S`E&^RY^eEJR26>jYETnn;Pp>E
z4xrV8?>^qEe*bjm*HwCa95;5b$5m)<PJA@eNYTrHasADU*8guVU?~ra?N(&kIk9HX
zy5pa64>qyB-}gI@$9kn&s2o#|#p81?*yAE}HoJZFnJJ(;^}*})`=9AL-_yL4xqR-V
zTkc=AK~?;|GxxI+md|>A@nzPRuQv>jJDokv#3)_pwtvq(u17O<F8H~L$+aFk-1>Y$
z<pono+aUd<R)@IzSr(5vlwbVuHNX4iwEq4hT&$+cJ_#hOkt{s5{i1%p>!X>Ul&10g
zvVOOtxlg9zLF366eN}?yET4XbIsg9lw)|%5bkKsAHtD<$X@M+%RmB4l#*cce7tePU
zeKb?YLHnByXhr&De>>30Rxf{UPLe;hD6f9X*3B`ptAm!Fx*8tuJ8hwJyI$0m6)}~r
zF+Cgxr+Q3v)9*<B)!C&m?<CWfMdv<UkFSp{ILKPEF#Y_z*XwrY1%7w!)z4xnQv2!9
z$bJW8{ri2t`(owybxVnx+DS2*pX#xAEaxjTQEOf&gL<WBcwFVv&FAg9y`Nh?pEG&S
zy_N2+Ee?9B@^|;W-FAD2F<brq!~FJte!t(ZucUD0${E3g6v=qEnH$rN7@wE$(wxWH
zs#-ZwSlw^WmrLFsb9Q~dS8aYHq4`N^fUTAi%OaI{v3%2z^>MQMXZ`y6+Wz~U;<Zs*
zqjnS|3MMXGme$0OeCmYJz8<6GvmNP^w42;OM_wgOdbj8Exx;+cJ>BmF>VCi5ouBk;
zX`hHDQ{W{1-P@(4OtZecTD^YOv+Ijox!*{C`lb`~jKgGhVTy@l@a1n3smH7wCg1h{
z=u@^s)OFo?yWek??6>dO>)6c3D`BueW7b+XHR(o;lhaGjOPl5Vxa6&Wb6))ae}A{%
zDLSp_-1eev`kIeA6>i7+1XE-rean7GL>`+ScrNkq5o1dhV>Q>aGmW1g;?}>DwR&y9
z56}s(7Z<xP{%~|jZkAh<;Dfbo|NlI<uW|okXr6DnB&>n+btAjni;Iih_t*W6tA4w+
z``<r<(!7mF#Uf6gs=geW^|R|FGn0J%pO4Gu*PRm8eZT+zzR=^0Y&MqKk1h#a%=zfU
zV&OW4<#xeT?KZ~Ui+ZLdKL>5_1s$EB^s04M)E<tMPhrlfo_5Z?nnlmf%nV-UBWa#@
zr-f5^hQnQ%s688|C7gbJWo7gJcN5*^Di1KShm?IkYCN^K;NGeH3kw*H-rq>>&%L{=
z^qy9ztCNB-i&prjNbVVWKMSU`pZ;}0@y7<OGfN5=Y3eYoc&729=El~u?~cS>dn<lE
zg*Wz~ang>q^os7qXUs2g-3u{YkRo`Z{fTkGOsn!gzk@eLtbY8|)xjn>?ud&%ON)aV
zM@Uz@x0<MWkK1!|A+rUQFV3Bss{J{jdlhKqs3<5pen#`Id^#<Jr!D5rwx4<{<5q7L
zb%dNLrXAGui3@z>Rowk7j$Fr1mq$lGKkC;16sz<%)Lgrd;ZD)#v*xc?_#d4Z!s(>Y
z0Lrr*S7L+gkN3%X*T3Dp#A@BSv|ffgpPtXJ=aaXyc`Y<0kb|RfjbO*7vb5L?&(tG1
zM`!ZxI<uW;o~r4W$nt9+d7kHj7UZot9q6X0z_Q8NVUoV}oqyB0H;anT`7}FU(&m4A
zx6@0(0-M9F1>5gcWmh>Z<zQ3{&;%W_CK7i4#LIT=sCA#z#Tg$9TPs;+UD2?tR4ZR`
zb@L1*0o9+IDpo5l+q-5<n&sxHR~4GmmnzPk(<FO-M(&*MeR4G)4n8T-|7QEpHFaIk
z;+c0A*yi3cnd{#5w<FYxd9g}-S-xqAm+|AM6~@t#%&8xGUOOCJ5qr!yd(FnS?91S@
z!5Y0>8?LgkPk8fn2Ix$)+}qof-FjABkqWJ_5$u>$^D(DzP1Mw^dp^CLq0LukG$|*@
zJkptS!)E8z{_lL;_cjD&FzjA+=fJ()<@r{pA3bgn_PbGfJ=S~SW7Vt2-`(BqKi6vN
z`;Iv8Pp%EYZj+3of0mrdPCNQZSyYUv%|YwD#CDIjr<vO~+}iPG)9G_ouh(q79yj~d
zk7qxZtc%_KY++XDM1K~eoz1S3^nVL-&ahg$fZzVlgdcCXL(jP=C)gzK)2(w?_n)^W
z_^qA7RKb1k_x-L=`~Kr`|Mo*0D?bIj6ZW@Bw6ClK9dR+!JpZ2N@;ObSTZ*5b<MDDa
zuBe^xMmucHj+e`3^VxiGco%Uzva|He_4xX;#?z);SQ)%reSXa){fDRZ_diLzSN8VS
z#Hxd=;yTgW&MdY2`{gp|jF5i&e-;%V5;!WPo<3}sS1Ua^Ni}^=A=?+5O2h1HI_~}t
z=9b@kd3Ck8=kNFX|394`@3&WP>by<iM?V*^%a&X)JTCM3f;0ccka?5JSf<TAajDj(
zeWBNPMsbEm*0PSB$G^Y4eEhkJxZ1uW69d0rR-Dk6o-nWe@9X%NMVj{%115T|^qp;X
zXXg8^U(aUeH|$M6KTmA)K{n|RPS>Kc*Ir%Z$}RUKDu3@+57!opUk(qx-Og`k&<x$4
zcXx50Ro3gZ+n=eIh0Ig9o4I^$M`Zck((u^Q(0c{jdN@{9eSLLxMc`uZ>3YXy%VXZ&
z+M0cJm1*_29PhWcx9fk|Q-4ys?Sz(G#iA|R>vlASDc-I~5xuQb85tq{_wV=n@msS(
zxt6Q|tvn8i3v=v=)S6WB`|bAST$=*U{rLE}QgVlLJKwad`|9&+jH2}UeSc3h<PT5z
z!W?`0el#zS!y!AHE!G7O9IU?n`FuV(tj+QJt!(|ePcGhvs#d*w!qr$}<NPzN>2#>8
zmU;ujD-o~ro$B)>I=<IGdR6&;@Ap}MKR5H+Wn5cxv)BAy$EK&3Sq`0^uAlyRL)zI{
z854YGnH<dO{jmGruh)vsPqS`rQr%Si{cid5^zC_|Q$5XHWEV}GpYr6U(<>*XS-cF2
zEdq1xYJa_2y}r-t#*V_rpsioiqVpcs=D&Eq^SRuXtM2l(BDVeQ_X2z}<u3PmW?VUF
z`~A*jt+X>U7!5%iuIAPKda3N*_hc1|i^8VQ6W!$;U*0J=%<K90=kxhDr#q&s+kDQd
ze)9A?(^cZXIoI9FTppQb#-HFQ)-ye>D)ZY>&2&x$g?XA+6!!o9cKgj|)7)Doix^L+
zY=7JS;9=yce!E{9P4iTICU)2|X@q9v+?jf3eZ2kYL+7)uuB!d_^ZB16CS7NCKbhq1
z^G5J+_W9>?iusawBA>>+vwgqkvq1HdMV~J+ELof^Av`}!YWZ@|=}9_s)V?jaU-^9Q
z#MU1xmE0$G9Qoi_ux{I}tPmH8iT!6MpP!jN@1*~h1Mm0$mt&VdwDf1^q?(y;gr})x
zz1k`uz;v{gA*h8dIiV-RY?_<B=)|^ZZan&#vvV&8RhE3e670YA!?M|VS&aKG-kN9<
zp%BaC=8>2FR9#Bq{);L5E@kqUD6gLLB>7Zn<;LAfwnfp0I07fFG1(-*SF!Y#zxvOB
zXBo@2bNB!Kc37s%Eql`Yce~%`U0ETRrsY0i2}AXqJKP;H{RQuPCe@^LpO~q#<cheW
z0?VXchUMR1C@j)gH2ck*pDX=dE%<qJU)$16f-h9&bE-@~omyxpcHo+qLc#?%5kqU`
zMj6Ad#oWRk;ZobYf&_c*HvZkRxa$b}tEA(C$uBP~bl!M&)6aH&emygnU#co<IprH%
zPJh)nIbA~GilCwbi<b=J?F*k|6^z+g`llZ47PtP@u%-B;AY<7rkJQr_DyO*#OrLqA
zOu^(0Yu?tYVSP7DJT~X<^0051;`(XhUUrfCKOfyYvrSJ<`T4Ix8+`oVjhfGA85?&$
z+SI@l*dBVhc2hy(?c{#j8wU>GcyYb&nrq<tJ)gSdXL?FZXF73XOYsp4y-$<N*nAfB
zH84c?<cMuL3@QVK4IFBkCUJRxntG*R^=799E`kSEi)}x4VCJ?xiH$9X!&{xJ=fplt
zVPe;+m_FmR^T(9V3861%9lv=nY;6>;q*2S_7K>E@3fHbrkFS$_KI`lCc>7qrM}fcN
z>i?E*lb(C1JyBq_0=s~6k*bmBwR0|ftF2U7S`OF?B%EK9e7vv1_0blg8|Qzs$1eT*
zKs4!!9m~Ax>0QnNGN(@`ZSK_(R`WR!$yP4<IU@0O-{JaHE(1SF(7I)f4wf~Ga~za7
zR)?>T%lzAyc`$HN|5Ju*xqs%?^e>+A&G{_&{81OBhNP0?vgHcK7RT8XZ^xd=-}e8w
zf1W>Y&Vz>xFV}_p>PkG^c;4>!k9l*<?-VdgXWFT{YtFgM^+=5ATvD*$(IjhE-qIr+
zx_llt$`-i#&9S(+RA_-x)alIfZkPI{%6C2-iWc7SwqVu4>2Y^XiQOu?6IT#%xBPx>
z?0Un-M{JgvmsIwz3*vD1w5xI6nccqdlHt~%1lF&C94gX`IxQzw-Ok&6Rz-f6@T23U
zI};DL>E$pMt8lvPn!h5l=8iwpzB`*L?&n`lvS>`-T~g<EC_<CD&FjAp6U&_GAEL8w
z=Ds!z5V60!b&jmvyZpyb=AUNb2wvv1QQIOWMHAFG*8CD*E5hWophVz7p3k?dJ7Qat
zpDn$dU3F^hk~x|t_r>DBZ1U}Rbuw4q)2TVuCvOYqoO3)S0djhb4qt*fI2w;A9r)Su
z`TswaZS9#>ZNb{#SLANLTlPlhmx{)$h_9P&WPjoAjJB9;9kJx|(cCLn<P;THv;-M#
z9JMxn*&)_j&pUs|wsj0|C!GstxDdJ&v^B+nLqvelb01UF`c;tr?*ax6k58fuC83(2
z-M<1%tS$;!^Ytg!9%3?!4dn(+yBfJP%+`vp`FNDUu>JP7n`zSjUp=2`oL+QK#(f6E
z(z<0z2X-&HBP;c{E-`DJ&5;*t+iV*C+}E<;JskQ@wL(qJ|A*Q}sgFMM+^3%Qa?*eQ
ze2Jy|iS@f)X;n+~OE0z5ja%V;?o*FvM(LdC&kqKkJJi1H*Tg+Xs!u4k7i60~fARN6
zQ~TF2=Xq@nkv;zNw1Q1L9UN?eCnP>G*u6WgRs8o3`^l^IE1l+M2k89UaG39N=z%7a
zO&g{b?Tu`kp7HsVP<Pv8f4iBx9dhpm+?Y0f^*4>|^qpK<lV_ZMv}4=z%lqxWEpVF|
z>7u~W;&4&%fNk^&-#N<n3XjV!zJK_1{q>5O*X)n`GVdyRdFb_X(47g(Wp}6_J$dxc
z_O%k(psg#f+2ZW1p3O+^TK)Iugqk~jhg~<`F-kwLBrfid1-j|wrs9F8aUGJ+T$XS<
zsqS{tvKGJIxpDRPH=EDbeLO0DBIn(Cm*<;*OU=Bz^Pbf2=TpPuPO@s;c~$x0AbauM
z&h__gzTHUPWBcXl^!Q7Wd7s|g+Pb=4>(AZt`?9uGCfapMl@c7557%itUV31nvirU-
zm%R7?|7X3L<z7L?%}q;7r+Lh4SYGBgH`p#W<KCXi`}=AuZya3s@!#+Fljl}D?(Cec
zJ|W=BqWQDSC3O!hT>v`8N7!h4EoeL7-Cd=d85gx~nHFBNlW%wI?y?g0U+3-r&*|P)
z$=$RJbP5fqiYoKdZqL5NXZ1osZtjF9zu6DnVLEqM@{VZA#+b>6Sy$TJXq@$j-|8=@
z1E9%a5D41R{C>~pbD#@;cuupIzWQ`p-~LgD@}rxsJ^?Z6y&Ov_LG>ipYEL^kz2{yT
zEIfvL4xhVne|7l!pP!%iOPQXsD!LY#F8Iu93+OTy#rB>hoB8d2EZDo$FR-JAMMx~<
z%&HFmc^3{hw_Os`jcVB`9eMh*pmfd##$AltlatGzsw#V_O^sfXDeA;wbl}OV?0Z+8
z&)xK0=x_T~B(><Z$^AX&?UIggy!d$k<cG{gyV_q0VJ`z`o8{iCe!myAsrKiu4~O}G
z8{O``5}m*I=)V{1_y4=KGFV-2kJ}k#qsB#%o73EypS-@fm_657V^@r;zyXe#?+&j3
zx2H>31V65k0_`V%Z6AJ+PtHc7uX928Bmn^?PUnWx6EDWz>1N5{Fl-Fl&>yq)=$b?2
zm8Tu+CI~J)d%ojQtAxTl>ovM^^}Hqe-0~F<8dVnNfEp!76p{;Ajw&CkY831aNm-~Z
zFj>>khTZXuzy05qGq<Z=uU&bF!+>*A*Jlp<z*bFm%?!_q`wIk>YQ=RT9C(s=PQBfJ
z|KIBM`;3_GZBS)9c+$UPr_}Du%ggjcJv)Eo+~2oX(7oRN=ab1Itz9oqoSE5rESkqp
z+lJ*~P@u-g2G@1bXZBQnzEC@{sgL2KPoqb?cyJ|Ty+^20)J)HR7vAidvg0tHH3R!<
z={L^`d^Bb%wKfQN*f2@F-tc4Qrlv`ai-ITWrQ2%GeBzwF@jLGiw<rVmkB+w%CPc1g
zn#BM1Y^NTF!57E;BOKl-2SB$2l?zOeP??x@WWgWD&IjLvH%dM-{dLh@e&P|U1FC`>
zgvIytI^R&4FYJ0`#%WGB*F)b#tgVz1zaRAS3@%($VEB@0hU&&^i<*D2%!rQOo|kL5
z@5$o}Oh-SlEb#YXR`Z|tCaLb{Q}K)dnOL8vtVW>e+@k&G+ix6h>@rLUJ#_Vf>c3~U
zI_vrcG*Ve+q*qGtPga;_`|LZPtW|=i)|CG$m(G1JNX$If)6XVcG4p({I7?o7pU33;
zIpu=KcsizC+nnz2?$e-hW)F)&;m6+oes5Qs1(_cwPrvz-&ql0q5hvrSsf)kgSK+$C
z&HL#2&gb)rk1U(KaKg{`th++&6VAE?{hH{Z|LK>Z*fFs#iO<!Jd#bNq%V_9(wsC>y
z*|-%meC}nP*!aX**#64}XTj<{&8;(U@2!u};0ReSAiz}Uz;Ijk6`z^yzL{lb4QBjr
zd#E+(q^Ie1gZnd&x<)Znr>**P%%RJorQxmIzPp>Z)Wzi_tn{=vxan*gOVO0DzTYpI
zu6SDnm}arb$9q`+a!}ZEGs0`)Zbq*<w=aj(m#uVr=`Ln)rY?PE^gPRHHG3*MZcaC>
zcrwvl<r|m9w^dObM|c_kFc>lwzLI>bE?)nkDe6vC#JX=+RtB@*KT%RNCB)14@^tNK
zk%Fq1t{C|&)0@X&zcW?sZucg;r588s?|6Bm|Ga_E;fl%qXHNtKifbIXcWbG5olRYL
zj^U%2WI00<&_#Po<L!L^9NxKSyRgfLY$m2}b=`a&KlrCcX|Qz3GTOKlTszQEcF|Q_
zv^xGnMT#`ffB)-(=gem8S2a|-{&8l>$~PQEAq?>WH3|m1`Xr6nW<CshdvlLZ7;~90
z@9!Etea0sOO1cWZwM^n_)^Af5pF66sJwUAR;uZ-NCFyBm%T}IwQm5Zx|Neb{G0V|@
z&Z;@zVjEorPt1JMJlpbs!+cAD$9~l!tSnielL{-H8+r}TO(;ys{r2W&<?1a)T8GlQ
z4?H*`{hV>e-<`)>dgYGX=2V~KP-^kF*zcTPhhutD`P$;==W^H{dUYj&hC`k;D4bKv
z;VR-_DLi4xRrb$<H83q$F?qeneaSHXJ0{0`V{Nvcp4nde{qA+Y?z1zE**})O-Fm%E
ztaVd|n!EB%%_TQ`m~PMEu+KJ&w7A>&pS%C3?|eJiY~9UP%}0Oo+IW2YeEfFmwKb7H
zcJDfrerAT_jAB{6Wfn|=brv%#PV9K&Z}X8w_8CL7jisy_7ibG_7sF+ZVpbys)0Jmt
z6#079WiDJURl~&1USnPRzm8)<#Fjg<Uz~C_)W1}#N?~h@30I4b2OX4athMO5?S})*
zdQ)dRoy~83Ge_xo?vc4c2KW8=u3IOusHdcClz5`;C*${8$-y-wedV*ChmK7?Eytly
zQoCIAONh(*#g5%anWw$Fu~|i^majkfIipL1V!;s>fu%aBF>mXF&K-{IugmgY<Y&5}
zFG2A@dDgZo5)EuR=M0vpX@U<cd?1hz-LkW3@)bR`&kom$<&7<*gt)>=VkLO&e?Adj
z?5KJu`0jFd$wdO^GJj00aDBA4>s9mT4*C2<5xby^^Misr9p@@d{p)0}+|(OYBbxN|
z)YKyjX8uf(&^>ut{eq^ElJd^0N7em!AJ=-to%3GM$rf~z!$q>xVuBaXVz$}`7A(BU
z_YDi)C<a9F)u&nf(vkh};HmYZR~r1{Q_Af=WXzP`aq3)Ynn!$EqY|j<T**;zh|yST
zUBQ{<m7klPzkfQ<x#aM*{fatQ@3XA!^H+F(KX0lp^Tc=R|8$;RdXyBy@ZV=n>|K9x
z2l0|u3tS)B%)IsEinT}YjF)oUF%RxEY2RVJkoR-P{JH~j`CLZ|X7|V^C&o;(c%Sh*
zX64G(u9HvJw%vL2xv-S?HJ_PfVw2O?u&-Q<iUA@V3f}^LSo{im6J7h!MLMwMF!yqG
z?cJ3cHg~++i*CO7zqS0n;kVGx`v>HO56%z};Xc^ZrgX_=f|hS{M}xxzCEtLqk|iEf
z)nu1=<~Mn)&vMx1<nl6MA(w}sico+_lVeZ)9{JeH@0;frpPzGoinVch+O@xr)9%+^
z%e!y98Fba?r-}VN-y>?@@3)_$pVQT5<@lxc(#=Gr2^>C|ED|g;_CNkKCGKc~-@F^I
z-sLhqD|@+gdXa<3#8ReXuYL>seOP01DDB%p{<;nCKcD6kZgffbrFfv|$f7$TzWbM+
zog;ekgF>0l>h1p?G+&TEC!W4NZtIT+>+RELaHuc|_dB>qC+Zn~bbj8^$-uJ9V!n#9
z{f~qEHRpv^7My(3@x}PG+$04-50*4Orir}gTrZx9ELvBxK;XrrnwamErY15!*t5P_
z&U67O5o&ZvSi(_KY1*@8{Sto`p+>N%ns}q<iK_-XxzAk`hKQtDFdaU5^}^D(a~^(p
z`UxtyOqJ>6&t*@u&$_BIbvh_b;Na<WDB7pE@FY8!r^0kusPRe7<xg9mErJ@{=CYvs
zbM2?e^PgOOP<`UsPpA_V1SM2hb}T#{@M3pzP;4CUf8VPHYhrk1nV&2My9eaTLjp5)
z#PAAlTOy+&=mBx4gfdgC(S3>C?=tkFA4li!y*j(lYoDH`*%FC&$+x&KZdvLtB5lW{
zW|}6d47!YIV%v_7CUvLo{CKr`eV3Qk{4ni<H?*Iwn6lCJR>sVY)8eW!E&Gn&OaC^3
zLj~lW3tbM^ZR4|>_l4wbILP+-ynX%R)z9bG*LA8+izqnAs+?N<_~nXvxw;<@Kc6+f
z-)nyF#;wEG_y7CKDi*<zoLwt5b(8+9htKW*SF)d7A%F7yp6hYdo<=)QX{}zf`JB}y
zd#%}`(_(%d+4=Nf*Mk*Hjkuvfw%9qL$RhrI%tqVtNuVR(Ku1tt4U7IN)Uzw?cJB78
zVOlHi$+dJHziRjY=lRb+-`83hni^$-4%>+T^GLk@&tv)fg~w%|DX)I=ecyN6`>9M3
zj}NlT>s&2bdD2M<?6U(a1YcY#J9X*9?E8PxPO46q$^U#Vq58_C@VH9V!h$Q^nYlaO
zY&tEZS~vxCy`kR5WrlOkbluL~9vf>pF?Egj<mlS3SJ&@+Hp}SUuGj0dpI&VhkMsEU
z_f5L}>Un3v=cOq^Vke=8qomL-Qi{*^%Z0;y);Tej@f-F|kFWcAD|`LYciC&6|NH%Z
z|EpE2|9zUi-=li>QL$*9T`v}Kc-Xr*pVYk`mA!V>yaaXm!h-Hyb+dD~-AwMct^09U
zzAGi_dVI-6*Z1}Re_xNzx1AcXpz+<_@Au5_*9bSq6rVM{9#{Q#mc^+ri$s>6*5Ci<
z5VyXJ>68g-pEvf`<{T9b@0nHq|M&gy_*&C_t;svLC?7g!`Fzga*p(Zzj%-{pE8%)<
zIV-p8ic_!G@Bg=Q`Me;5Xst&PbKR#Ub6k!~ndoXUf5)mGIRVi2d+EF#4?WfAs^mX%
zlg@RS@$JN2Eo0D_<fYzaOD7sN+wBKukr{;?50sp1KiCxfkn37-g@3-$tTh`BaphOv
zzCC4r^*hTly>E}A^L8%%62?{ie(!e90M}dZHk$nW4@#{SUVfLlB*HsBdK^igQ|Pu*
zlXd0%`ycztFJw)VVzoISv+w8Ge63{T8K>TCKHpa@dD?gOy0ROI?F;t5*>w8Po|nsJ
z|2YyOwfMo5tUG5|4X^DuV|04O!Ot;w4Z93KAD6GUnP;}|qwXK+#YZ&_Hy^wF<E4qf
zBCQ2w_bSt;PFq~;Y%%eOfW_<F?RUk#D16Ge>}&q@t@EPL{7=Pa4A~D$uI?`8xn^;t
z<D!3^I@9C!10Ll)Mw0{0b!<}-Unv=!*`9A-Ui@Tt^3utb=a0W`vPi0XGRgE>gmT%d
zPrgjn4_k!GZl+F8pI3RzXkP8N$k|)h{(smm@3#5twdl?5D&Iw_lmx$pmHi1?-!S>3
z@1DQkZlAPWYWGLxu^8z1w2F^M#ozyWe>~z;g|V$l@@XggGfrnF#Qb>B%zv-)`CQOl
zgR?L7n%~=D%DJ3#e*M3nOWatN_;cGlof5p^WR;Flyw0na%jX}vZhL4FXe2y&TFJ$>
zA9vr^xj$%5%w3+W$#3(4A&1}Ug~Fuyax?f2UU;?T*T3)k{SVpw`|<d7<EA;FqTzsv
zgF?&Fyw|-apVZutySC{@Qg>|4$D@0<O$S{^R@re%Z;v2H3h1C$IXNR<<_YC{U&mhQ
z`8e-|==vRxxXe@DSDvqqx-Y}mesDqO@*fknX|LTP6uMBV=HnUT^E{Df{hOq6)ql;J
zzNPH;3ZA*TYjXk*8*zbJCm-!~RyoSpzl{1=RHiqrdE(_$Oh=BY_@-&^)l@vJxp9}m
z1ID69o$5|PJxe?JbdT@;9{65UrQ2;v*_B(MD-4|Gw}K1PG(M)-?=_FRnB<NX{9kVW
zS5x%Pa=WjZ)rOHrQsX42oN?g+jUIUQGR<?j)5&Dn7I93^O}l3@Ux2!4VeB&5qs%cZ
z?{+?)_o~xQf6oV}6X)vkHl0-4#~J-ZT}kb-oN(}Cp>y9q*z?<d5t!$-Nyj!jTjBnl
zuaCOcZ8y&ob+Mg)|Np=568HS;J_#4eDgL+?o&PqMN%cZu-HP9xv-dcty?VVm(qq*U
z|8+a{>%A7~+HpPEwOU6VR300=6`Wym?s*??d7=eZQk1XPq4hOSwX=3?T&;L`r+xB`
zLq|5&u`fCB=xOiyxy9!!k65_<51IS<jPZH#eIHt9?a+HIX4}@X-Oz90E0yHhpQq#b
zq}H5!(Qo%_#eDf62iT`Y=UL8Se!Q|;!q0Nc;u&)Sv#P>Nu0>yWk=%Zb<$3pfkx6!W
z&mJV`?x@p$4mz-0z}zS8&cm1UkJm~%`D#iZ3T|7oRE;Zbsb|qXHl3fwf*vf(Y?&sn
ze9N<q{n6aATb(NvE#ItI+{bm~rCFiH^jx0=sriQ{*eqRr<mZXL6uULuoIc07i%mQJ
z3;s6X=Q@1W>~>EtPgR>-NMDNkI@UWpB0H`LI!Ev3VhcOnbg-+bV%^SXK|j82-+y<;
z=J|@*X20&#|NpyjmjKsmca<xxVo@iy#>7fK4S2rq(<$w*%J!U1hE=ihPgm4Gndn}X
z-um<FrF36#WpG)#QSY4lJ4N-YRhvH^Zd%wLtzmRizwTqV#M8_3|9!bze*di1VxBH_
z-K|%Gwkloxvf|U0rMt5$bf$zYb-4X==lR;c7tOwtN)K&28vIG}UbCQ)LFM<|_hr8%
z9)7uMy-B~5$DDVcy=q?wJyySe@MhZVx9;}80xMTJ<Q!cR{r;^}X5^3BOxO4<FZusJ
zjxScbp?mL{`m{5*``xw(XkY02^(;Fue43HU)}!aVdz-q0rcYd3W^Nie4N|g3aRj{F
z$-pIjY+>5Hqd#xd8Zdpa4S8&Lt-Gu8Ugh(LCbRSR?YuCz>R$T%T8Y2LPdO%XaHm*4
znzs7hebAokZ~AliB8zqQ{dlx_4u9Tpr%x{y_p9ZJRf{<bi+Rp5)YE;vec#u$9id^b
z`4<=0Rh#rCa0*}k{cd;v``C%5n#rEiWOyRB&e)?SVR@X>o5Q~47*A}dQCi~WW=_Lx
zPrAMz5%!NMILKNf`l9;#?fm@<_^+yd_nIeRBhzu>O$fi`6M;W+2hZ1iGoG@?$l>}6
z`#PsiuIX=iyiBXEi7BgmXwBQXH8e~OoaRLZJ7ReE?N;-blK8Fu#NhtnH(fj~eJ@o(
zhYz2f`p!^pZtb_5iphnY!TzkLIyPR^ve_irWcDh${*l|m)z=ps=RV8ce2`~@Sj|NT
zx0DYn=ii!t-d{=Wpq|0iuDr>|HvRaVIPGD}0SR@J$@2d`wEvXacFtY3@0^NhmG=D^
zh6gvAEPTNGtn2maEnJ86l5XAFvD7%x$MD7BZ&P=~%+lUbbwtY4@%&V$gN?^RvS;b2
zGYPkW>w=9)b%DLirghzhzaLxF{#aV*^!{xOi+7a!*P5?6uVt>^cz4}L&zFHg&fC+)
zF=TT72KGl+R;^waC0*}UyhyHkLdMnGiHnad^WT<i;j9~J|LFYof4^Qo7JK){B}B1k
zqTNf;$g>KIm9KUci21s-6!I@`JJEV6R$08G=y=+#>G5@zhEpsudiO{Puy0=Ju9IpN
zH}UG71Dx`Q|N7Yru3(+5Z&LH&WYdlGGw%5<e#OfssV<!5ko)T7=eu`yxJ`M+wdKf^
zT_)?!xSMX@QgkwEa;hb`T{E-cz>|V~3*WeJ=xsAq7VJG#yu$q8rIuFiWu{Z4<MR*u
z*E|y5J2mSA|NKuKJ)vn$mOrPsbH%)Pm3=gdQ99*}d+jT=yQw^vmN!4Mcbm_x7a?rW
z_;F?SBA<;fFGu_P9d)d&l23(XO=GUclMic>+h_dhi#)Dh|I^zqvvkMvjo;_kf?KB(
zIDGP1c3IqSd734`dL(8;?1n(jy64vSHMeOkexW`as^*Ld%dQ-2N#RXPKG-%aDEKn%
z%=y3ar%y7@iSyG0`^P0ALGZ<-pKN893^&GQPiLH8;HM9j*dv(Ga!_W|?V=>E;(Z_6
z^nXLU2Q!K|{;2!?celS~`&KVEz5H~|EKscoYXAGVvZzHqo802ROQ!FYl7CVE?DBh+
z%auBWjnk*k+VkBd_F?cvYo+^+bzRQ3Z<oqnm%4aeX`hc|Yc$97sh^CwnAW-7nG@8%
z?4d)DTyJ?J`|=s<7IBGOKl4$}5N!ELrv#%^J4KV7$?hUTj{7U3{l&O~lVTKiX0CIc
z`S8O%>qO3<Hfv*rsx#Hrw3+Wcc}0F>#gB95M)@8_p_etd;~sxeF7uaPermn_qgiF&
zHF@UE;LJ3*+8bo&%d(7}X|md9!GN4=_y4{tSB~Ih|19*ks7k?FZgt0m{7oCVv!cHH
zAINGsxo*Cn*UfCve_MFYav$9?H-Cx2l*AoZv`>ht=J{O_$ePIMqxCIXR(ruR`M}6?
zN3VuOpPhfRAWF9Ih+q?Q2m6$N{|+~5X9_j0>~;`;E`2?QmnpexvZQ9>!SXj7kNd?6
zsOLQU(KzAe-Z}4=&CXl&cFOI>`2Q`w%c>O=lUoW_XM%2ZZoK2-^K_z<hE9KhU8K<|
z|C?f}I$xe&6hEgf6Z^7q8tWsqgHf+mJgM1yM2l0XvC-9G8rv&Xy-L|?5Ao;1S8k-+
zOjBPj+dJ(IqiT4=iVL6~!}q)8@rQ37XPv1c2s-PfNG-tGa@*sHTx%x_9`;!zf6(};
zgwXn%S0_&1Ey9)L&;9bHmSE!&Q8QDc;Fpa;KGW=s7bZ?VyMSfaltzz3(MKkVPZv=N
zT-$KPC1~QM6f^a+9V=xgZdz=9TRSgh)g^@sZcck5#5`X*NZroeewV{aEaQ0iWsbUK
z!aeiXt)IhuN~M}RL#1iK2a|xxtfY0`tWR<Z>?V5X#-~O7R5g$=xp>58>E77tw_6p}
zT-h3VI37*B_V9uNbNxXlRaYgpPJZ>-Ei8vrC$Dw?a`i%4)6IltUM2mSnL+uE{6ZYu
z^;})EKAURX{hau4bI0a?^|vGX_RBNY-nm-YZ&Y+7cCN}-!TrCV&CdUK`~JUe-|tns
zGn6zbRQ+49*ZBH0zG=5E&%g6uR;6B*sq?@U2ZbWjGfD2Ryzjhwc;whRcA)~v7NrSO
zy>@R9nEO$9eh16zi0NziHeP*nuXRVDz`dw^p>u+r8NSM~7f)%guX!@jeO2IwwaOMh
z9yBjrxTIv_obzt^++~-J2d5@<EKh5V*vGoYFX6REVS_g4qU=jMxSwT)aYQ(?>|JaB
zD1P_yEwNQu0XC-d;)+hHb{xI??QQeXNfiPQ+#DZR7cp9KO__C#sj-u>W5VTMf;*R8
zNC-45aXiqx;L7`bzu##TMsDUZ+j&}V_mN<xMea<CW^podXP2v8unbKRNVs%>@pPB=
zItl-u$1{|2YXx>J_!=z5HCMcYLE8Uls_9Y<^L>SOYpzr})v6p3s9Cq{;GV6m5wSM|
z(|#=W|2)0zLj1&S|0-nkO(u32KKjj3`RZt8uvWkIJB#PK3w;HTpE)hxwtQN{*GFGu
zm7gu%sBCaac5#oztG1=>IzF*1Z9l)YJAbpUbxgESzREmt#hU=X7fUxC{P|{;#kcwY
z|D>}>%${&zo7wHch3mNw{uB`KS|rzek$GC%&((>}8U2SHA7`Fm%v<<qxwp}8owqW9
z4!#9;IS(4y<vv_iDU9En$nb>Cbkf_Idvtg&-qFicSN(hB{EZIN{)Nq-3wKSAobXpG
z(pQar@58^2?{9AvOH+`oS(&LTa!l>2d|Y%!Nr&g_JpvUihSIFf>ixf7MnCE{$}6zz
z5d9~+GOzIai)p_mW@vAB{`19z<=umt!sQyW3NHj2I)1DxzGMEv+Cu)K@KfXSHqAyB
z^Q9VJJbHZ8^VGvPO5BAdrl<2ip5ojc#LuRB%(E)(rEq<0g87{4eHC3>qqpXC9IQI{
z=*8o&o~?mwi=G#RXxVQzkmKknIj&JGQF{22v)D2{ro%PA4=c`=x$W0ww34T5)#ZCv
zei!PB?EG><RmC9x_09D8Z&kHTErjPr|2g6=y5iX(#s(R;Pm3>e2mjuD&WbzuO1;4C
zM{j#f76l!ecJG9<qTUP@P2at*<&EYyZQGWX6EX9J+gy*IlG3}RgqrUeJigU;(I;kB
z(%vl#%CdItc+l~}m-G6T+tW%uXzjKanJ08moZEkuU*(i8A){ND5-$~iE{sYve{y@x
z1I@<?FXw#Ptg?CO9@EbbHyWMqMft2yvH!H?+L2dxyf2Er_`<g6algG>`Fd;p#U@<m
zSh603y<+_FU-JI;m*KsB`dt&noBEj8G<VHgDP-H+BjF`?^rX`Eg2iito9tCyg}L+g
zA8<PFbG%Ph%<Gw1pZ-Mtogv_nh-Jdy&dzo}Yj2HNR&510gXgh@7dV9{o2$!Bb$|Cc
z$vE{_pw`{{8rRJe%d$+LFw6~T736;5#J!hk=fSN3D__3#o&4#_zLnbc@|8~nS#!49
z1bvv?qH?c%<(gpS%<hSkt?wEarl|N`JhW3v=g4NA2d5@&XrIe1_DE>;b>4<G&L%u5
zi<}=@$+$#)Td`6wj5$R-jVmoLQ1K<(()=7Fqc3cYqDL(iI5r4W1sZLaYm%MT*6Z|P
z&jpF&%D2vKD7bj7(n+k^O>fF%mk-xUyi3~`8nma(`Sd8zKsSGL_{IFpJ`RyRONGBT
zt+P4p&#KOFKO%qi{S@DsPhAuQXDs3<Dctw*^i_wy67BE!uDs&iVmE1i(J9TXAKY(B
z{I1@6+I{=N1=&1`f;)5Xnuo^yJ>eFwet_FAQh1BkieHY-yH;sx`hC1uA@cQLCpYio
zx*~PALzayf*)ua;j<r}=ea$bh>vDgv<FU?M_k&O4&&Dna;(aH?&S7eLs7`FcURir-
z{S|HD&wEuDZ+CknqMNihy8jh_?aF!4GrQW(IoqY3SkmFN<6HL%Yt5%Kw0M^ndM{-<
zc5~Jy!TY&;x_5<6Nq%^-`2FQIo-9I*nGFu&&lkHV&3I?x8+`r#(p$AzJ9Ay;CQE!Z
zbN_C!=vi6)oKBl7Z|}z3a43JiuwV7iwUlk)#%s#&x#j1*?2wu==XU;n+s@qcw%>D7
zFO|;kHF~NwKby;cmDBdHtv^Jk<eiq<9CSWM^!TPf7k-)j(y><-v2a{<MdOP7+Fe_V
z*Q}ns-oo4V@0ZJ4j^DjwIO*3l<!i<(Pcs^&ZrXimSKSIDTjR|Se`)HPYsbfZbB*<3
zalf#4!mf5ukM+P5=Y*nnKQs8=9$L)Yz2mV<yxrbIk>%SfdgM52=7@2rHhY>0f^N-N
zDVf%IiC=%`yIrq4te18QH_zwIO{|n+O_KMO{m$tW(SF%;ap+Iahn=%Gxi8(j#9H_3
zQU}YG|9ABCcnKbTxBI<aw!Vq8=#AwUemq>sBir0I#pj65mn%C#SIb4rbW=6R)GYkM
zC+5d-Y|(|sUzV=E<}~BBi2dZSSu0GG-26^QT`urWNSCg<Yf(I<b;Ey^Gk2@s?`@5%
z>@~f{v3c<do@H^`+qnzHKtr$}1UD?UI#>37_2k1p-|Rp4&qr0A)n%L3m(q9B4tX5;
zE*72h@k+3N>%)H?%Uo8!4C)fqc<ff2Red{m`@~}!D!U8Q?{;}^*u?0{Y3X+8nBd`%
zt<_R*Ebgq{sI6$jX?oM!r@!mB5N~8))vr6n=QSti^I5%E5ckN9H}23Amd96Ek{^^X
zYCnon+?*m_ofFrvY|<t{Rm17k4hs#%>MwmtbZwgaQD0?F>(PLPU(BOU80fxlI?om;
z=i>NSJtAV?6t$%ezm|v}eX_><%4r*mse;E(D!XjAe!u5(Wc!lt88v(EmfdcB!Y-p{
z!g4U_<>K`G$;Y;KG^DQ4;bCIxU3^BtNAtNsBF7QIZ3~@V%g$9)ytu+5^t;fCmzT=6
znZ58<P4@WK`0b#<p*P<155I3&s-*4mdvWpJxq`=wI*&eClDNX?4Btl~A^zsNW#%C(
zW!N}mQaa*zQ|>D6uFO`~xwcJLWWyUHKf9k#eg&<~3!QE#z3=uP!>FZkJX7bYNS=P0
zdZsIBg6Io_o!0+S6c<&tS{yx5d|b9X#l2Z`7V}&4P~CqA-PEkoWt*$7>hR89IenSv
z7RMt>E8|*?Uc8cGXZjUA<*8QS7lw6D`&dJEn5ItI>Gq&f{m=Kqw_R>J{L!2$Xwg(5
z>l0eru_vd-<H@_Wxn@l>N?r%Pyt%RYx$}u-dy`YoE>RX(Z|Y|`*>2{X11!n{mt5oX
zl+@Zk-&FF{HG6dEr^Iye*-RIsf2dzM?JPgtaIJ&z9-H3VD-{-82?}0)x<hkI)6uNM
z=kk6lYd2hS=wUd$Rc5tcZg#f#AzuFWTM84}4}h*alQ`{{J6rAfLuVG*@{p}=6Z`K5
z7`s=#Uc23F59<&0DV}Cx+4=ecl`hMcWUtK)kY>4dI<l8%mFUeAK3?Z%EREc#v$j1U
z+j)|<=@JwDrVtiMvuT2H%NHCmZw}>S?0(>v7PL-sL$b>b9=H4-Tx^ent@@ds`EHI1
zPYO99)$@PJWWP-ljDF6Fi?n4IdZc!9O4zmejgR9BGW>6<S|&`4*_$zGj#SK@MSH5A
zi=LUf%Gm7k3$fE*ggV<k8t-T4ZM-4&YX3UN*@@@U{|I#OZkW?~+(cmRjez+Ne=xh<
zei%G$V@~?FQy+Nsry4JJP+4?gamwoHZ&vkKgkJXvlMF7A4tG1wbE9E}j;V0ugVv_Z
zz!IMW3w|sLzRtp9a=2aMXrEw5pxKT0cZ$y!Fr+{Gbhu-$;MdHOfIsgg>wW$dKk4<1
zy%N)+9<^rT_IZbND?W1jc-0=~j#fEYrK)4i5m$OPWh&=cmdHaUl2f+0v^`d^ka7R|
zafZQp-i>K#EAPnvzEFO*RQ%<HZN+b5Q~4jaIB>k%T6)!jcgkHp1tx#HpGzXIcCEYU
zzo|KK*Z+dh$Ge-=<a*z+xIcVm)HCt9&hPntEGBdAiOJ6OP-5P#*C(koUv%x_-H)v9
z+&bV_-6NcAqO$Lxg!Rd1haD42tlQ=(3o*@oe&hv1+vaIYW=gQ^_-XW_c=wSR>#AL@
z%dfMkDW6b!vgZHED+YEurJ4d~&#{pGT6^z><c}+vFTHy1+8mznD=Fu~GXKfv&P-Y%
z?&y`?v~cm<DJhmg-!C{;7hbNj`)4$hqu_d?Ox$v@ncuk2vl{6?XFj$nZ-w@Mri%qd
zrapb)&NG#&TW;-JaK-;UlX9P9S~Y9UGesZ2G^X2|pG2$?PK~S+V-sdP_{evSsov`Z
zKa2FM;w(2Vzg%vv^6-V#{FHO%yFAlZgX-6f94-o9-M$=@>^^oT(!#j3J9Y8B>8k3_
zUvu}|^aRZU3NXFno?#8T%I4_ij3%~oiE8|e-x`+GRQZ{e*Uoe;+PCk(jJYaIod;MH
z59|qcVOpj)<30=T%)>rrOqX_jTr0$s`s3!EUCZwr{dq@&>r7z#+E?$i=R<~t7&%<F
zSbjSuY!Jq0_>!X|K=Il1Op`-Sy{SpDbNuJ1EB}-`&2ZBU+>!sMq_9l;^bYC#J%LZY
z{QEX%FRubqljElcDOV$pyURSb*==!OV)wac;OVCxcBVxyIHgZVdz*az_`?3Tj0}s0
zWxv%cjn5O;ZZ`GIdLPmnE3){0`TNB|m*vliZ+oyT;@4Tow34%;L;cf>2IiZt`MEq2
zO+N9pU*Y{)mPkum0RbPcG||H7S1nq1y^F<9$gckFw!wam|1-wmmuJIn+3p7q9~cWa
zp0ErJO6g&0>92XB47xVq2lGDni#D@;I9XbBCK#R#aow}(@yq_&Pk$fQOjK`t`0|GO
z6wpw}fi3P2riRCDJZ53oU$s%F<)o6*x!r#v7r#@H=F?&7Gv)Dp@^r$LrTykL8}I!q
zu>06jBL2{YUF`Ur2&D-eJRD5x63?W}t9&MTx~7G}=I@uwUlrVM2>aU@PFuI>ErXIm
z;r5&)K68<Iwe#cZE$%<+)_?ayZBpF7zhD3IrnYw;FmZmc^?KaoU(PmP+{%sx>I8df
zF4OzODa@@j<Ew_%Nj2t<2A`Qm$(t2+zS(s8*b3>4*jVxBlePOcyxhGtHTLDQs=KAv
zLnrGAnCe9Ou?RKBv9KhhJlynZ)oP=_m$g=xq-LI5l2P~NqPzL64CRM^+;7>uWEWsu
z_;TBgB<@@1lKU*(lA0%;o6uu%zvgqPcUSBj|3z&379IAvzTsh;^sM<Jd6GdHOWtd~
z6mFDkJ-{j+ldv~??bfjPu(KQelf0L)Nod*c-(v8dd)4F{oZH%8K2Xk?4!Sxd@Qll$
z&(G)A-z#^&t#|JF#Wmk2N}oub9{cU6csyt(rD}c}WQ<Axbb@&P&ZlBx&*T5T3jfW@
zVSOHSn*aRp*wU%WKR{Em>*sBr&*|1M>GH%E@9Y2Xw(p<pZ&&%p?Y;DKVP~^FUMH8I
zxA}bL$61SNkoPsa818@m`~Cj?C*`+u%_CR--oX{#w*7wH?w+XYvE_4J%=Gtu36i%y
zzfO&TahES2&%12rTZ{9K1Ru-4v#-!Dc*%sjC6|5g8t?vm`u4Mr)#*2jl?+@3Exz4I
zj(lzS@=kKU?XP_;OQ%J3E$ZR^Z2SGr<}a7L-L1Co>z^ds$IdjRHG9p*V~Opu%Zi1j
z?7qEmgRM`9{oO*LUnhU^s7?Gn$y@K_p~+`OJ4<a0PT7Gb?6s60n7y0e%(qdVbC1%W
z8x=Z<7hT1d`fhFSTBSJi;<np)zpwBAS8Bs{%I<e-L-ozcepV;Dx!KvCPrZ2e?bXkm
z*ABm2x-MtRZj1Y;_4n7DvwR*?cvQ64OxFIcneqE4jgFr;9OBaEFuvtDrytb#YEy8i
z{JK=X_M}<+&#&4?qqIxj*S@dL-}O?hT(4-S;nK*Ag{>z9{{J{`AJw_JNJuc@{%f8i
zdeg0+N$2lzJg+HmS3}3LdU`|g$}iI|{C-iNopG=Imy5%mR`IwBji!aK;wv7qPMUAH
z;?rNweLtVgb`DId1(jj>EdRbNx6j@C_1ebA0qMRsl6H!(-*zj@@ZFu_^SbvoHveep
zoyT0N@IY+Zz3N6*v5xJ^YR}J_-@oH%f64vJy{dm#*VkRW_@UlvUSVJ5bL;z_){dnY
z9IyT2;4gnJ&J7wJWW6m`x@D@7HYh-&loDQBK4@TmHmBI{Hv{LJJH_Yc{zwJgWvIPw
zM^cW<zVNup)QOHUEVu5c)fpLAx(MC-`~AMZfYL|zHs<xtj}$c%e*e?EaiSyh_>t|*
zAGTM9{nM%977?-k_p$%H#p9m2<@a`ef2orlWAT3X`+fX29~=TM*(AI#St$56XWH4x
zpv4HwWSAE5a6f##$=+$o%>B7fjtKidiLr^y-uZgn?sNA4YwY-4-)4T^`u5ia?};3&
zQ)aH&>HE0w=rzFwSJ%!yFwO4h)gTsTx#~Y+pj~=-)>8^umN#oxiA=9Kv}41xm&@K%
zc)fkM>gN3G7JACNK?$DG`GK?iuM6vTzpMIwx4b-uPxxntQkUo<z1?p>(~7(jA;w8o
z-8!32@L9hp_<A+`v*G*`-|tyn(@C)_deOwK_u}3=cl%!#f4yFBKka*R#lc@1e{1J(
z9lsR1&6fo<_IKbd+uptfT8BAnEFxDg;QGB@=99|KCs)JcbvHdoIh!8UclyD^cGb#R
z+xjbfAKO}-p2Paw_#)`qj8&7TpHc3&F*+_&%yaHau^PB~0M%6*0xv#gb-delzpgs*
z<Q++|wSP6*P9mg#C@N$hZ)BJA@VA_!n5O&rWoxYg^V*LdqK0!c)gXmNr-RakMh4+k
zd%xeC9Tud)JnJgosR|RFbLOYxz^hikOGRdgur$3j6yE#kl(y<>sdF;->|_=#0rdvK
zQ<!YLjBH+K0xVdQs-Mp-zh`iBUg()$^VXNHw0^ha@z(2cw|{AX5)>#xg&IG)J!q58
zd$6GCZD3K+f!k@bbKC46Wmw4HFZ*ay7#?<7$6ndv)`xbVQ(&V(S*?fT&$sRSr#6N)
ztpaTXecWp<7nr|z=X2q9H;o(4{I*ll<9N3Hc+}1P{-k>S+|p|++r_Oq<g;UiQ`h|l
z<>lD=$Gzs$0`yDwwz!LhUH{<vR6OFkOJo_xLI2mkve)f=w(+>!>g+{(PO8nGksDk8
z^XYW^uUCRgeWxs~&^!2a(X&O}dRbcwMP~+7o!P~1s2p`y?B=OYWvetnH>Q4k_-d9$
z@y^=&zwd=#pI>$>bN$|LQL)0Qmhn<I&$Zas|NVNsTJxx5e#>f$cRL<Sy!#f^RcvuP
zXY<vspnDU-<FC8ijCYFFd~s0FT_*6l%g?v@_133X@0#a7^+Ro`@06owgnunx_q^F>
zYW#JVnGdhdt9}=Gz58tatCh>InFfmOn-*RgtG}zII2vSS$&cIDz8>Q7atgZcGV^iN
z;~6jiU*Oy_z3AW1=l6?F>%OiEOP(HE78xs?I{mZioA@g>|9(84Q(vw5_wva@QP*#C
z%<+0XOM^Rk$Gxi8SHG@dm^1zLEDinbcZzyLUNY6*t$Mw7?-cE|TQ0p?y*}<bXk$_N
zokI6m;Z(y8`_D7aU-^3ICbwQFC|p~uF3s?=%z3}}`@OhRpxu85D`WKqk2v$&hFo`v
zTr;6VF-ES?x9-Qo_S5Hs3Z~>&?CAcfB;>28(F&SpZI1f6vf$;?>D%0%-_KgTR%&9l
z-kuMK?EgIEFTJhN{q<r;j%&HPh@ZY{ZS>(AAI=z`e=_NYw<c$jvU=N=hihk^`hF{W
zy<1wdu%E@ktH%rX|NWM|<0+@dxApaZSC@Zj=CfMiqS?Lt(Xo%O@&Yaefi_!~E)CFM
z6no7yaNAMSNzpgOoUTawD$EG`|LtK&)Y^KFIV&xGHZHhWu%bpd&Y}5|-u>Mnrg8Pn
zpR(hBM`<s$-^aG}!Tq&U{(X_2!rK-b_49akz%+>+t!iBV-bzmi3N^`iwA^>O<IW>O
z-dloheUh58h-*vGtpe#Oe>$QybHDR?&0G_$`8U*5BYl4Dx3E)gHS-s)>H4_Yt=(C*
z@W@iBDNlQ@&S1EAd4Y_Uwo%`u1yAPRdZAnQNSN14{ZyFO?)v6UrLV7Fn|E(P$(8K4
zm;LT8C~3%Ey5+x_hV1Dm&9|#~z1r7?c~z%2UlLq3^%CEW1tkVqOW*Vcy$!hPRr%-R
zar0j}Z%UR<t?1(QVn6k1i{1khUa#XfXDyiWaVOi-!zGnb;srO^mM*UG4LDU%(_C~l
zG+fvJb(64LbJ5JyX)8IsZvI^6sCm@CGD>{TI;kmlHf1*jm2Jxo;1ex6eR7jy<hTEq
zqD86>r)~SB&i=@)-AKk{SJ(WC;^rb9d-wc@P6`EHpEh0=JmlfT>v*c<@8ROd?_=GZ
zA2>e{IDUKo^i7H%6a~DNef?|7VmIZ<{cHO>?^WjQ`}u5beL8QW|NJMnuI<0uU-xBk
z@5y4l%)j+7o_bop-4eX|%ysS)=KDVOhRs|*z1s87MXk?Y_wiY7icZ}ZXZJkgn$~C2
zB9C9cr$z5<Y@7e@i1gG$-?s)mn#k+*KX&aDLEfEe^Rn0NjMA(Qp7!H@-cps%S7o)j
z4_)=@d~c@lH+5Ukq@Pk#_zc%h*~@k_$!;r%TWjUCHOTG7uUQd~u7!CS#rrOM!*JcJ
z^R>A~cx39-uv2XZmQS5=bZ>p^D`}?rQvV(I*53xR+&|p=uk&?pVakWi*Mw8v$2nSF
zsnQayi8%k2-8|R!<+pbsQ=Hc>f8KUrdGVe-UtR7A{0I<|j!NCvcA))|=c3SO*&H<*
zoVsg`;+-Ejzm&PG^%)dQQ=BWNI{kf_+HTf$&0eM6c$RtD>|Lt`r@AkGD|3;-6TDmo
zG%?5{!nAl}f#eF-Cn-XZ1%*P5f{GL7<hvB87J^qIbRL-Eu3&k8N`33j253P{hJpwG
a8P`33^G(*+^BMyK1B0ilpUXO@geCxvK+etp

literal 32228
zcmeAS@N?(olHy`uVBq!ia0y~yU<zhnVBE{W#=yW}`22G_14Hd<PZ!6Kinup(S$F8V
zPu(Ui)D+$1U&Qa&WSqe<Nrofu;M_aj9!xy%{@qvE!P9Jgr`wvxtRjy?uUVLlbLT<Z
z9m4;rB4g99ue<x(qx8Go-s<}IpVvH}vu@S8yPI~sejR%CYMK*=Vv7K2M2F*M{|(V>
z)=D?ZZr-`IxB0H=jelxkj}INpOZ@a*aYjvD?vwY24S!biTjp>0Fa9R`;6F|2Ie!gq
z6rX9`WMCb<;Wzv59$z`_n+861_D=uoY9{Z`H>zf_R=bg(TN32Q`>iiQf2*ge&TRQ=
z#|`G}zZ-kcy^U|OwY?#kTevfF!~0t=X8h5+vD@dL9n0kN{z~=o>rURUJ~XM<e&rtH
z8wU@bn=nQ2@B^-cWo*A!|J->`H0S-615F=hB>Xq9XN%`5%h+Ih+^b_@!1K@5OIgJj
ze*S)>P=8<Ln=wCk(FK35vI7;{Bs9XgzuA8N+&97Qb;Y*@m0Nz+{kOewxM^;lO*GGx
zE6+0%#GWPw{@XW0XVw0EuIJ|B0Wxp+S*@Q}Y?H7M=lCZ3`LlodJ0*v3cK;V9IyQ5=
zk3MQL%SxCx?J2L|&)-R#Gyd(%+;i=ZLcP|Fo11JlO2jDL&_DC{!3UlE41*tLeUcA3
zV|CQR&EGR6`kb7={_@m)R-LZ>`BK+S_n)Yf&+(q7F4lc}_HQPA8G*G@THlTD`ycCB
zu{wF<&CGY)=g(bgZ}}7JQg-XZ-`dL+tA8G>&6%etCKP9NK)&zXrS>D~XTN_8t89y~
z(93_v-6Jlm^{HGilBNE>$T`tB{s;f1-CM48#5A{LBk#x1TimSHXKP;Z?>(xbu+Poz
zNGIRZ_e$v}{_V?*x&LQDoymCt?z5fG>I$#gBr_OXHvawXo{h7PPQKyyPU~lq%ntwb
zirU`1pMA{s4F9XspLsT3*mLfV{<}HpJPnJ_e1Gd&*%lEoFZ-SF_p`=q4S!;v@Z7v_
z&U?(WFrfde@e!xbEXDg1n)WXgteqG6ua%*OaehV2JVS;jPzQW?Ys;rq-`c@*Q@?Fb
z{-Pem-$y2V_no3R{{@3=NZ|1af0Iu9Ry!rJ$)Bh4zKi6iPQD9{qNSsX@rOo3rQM3@
z{M$@#xPIzkdy^ffUN1hg@>}=T8P{vAXYb69Q$L?^;%<}f^G@z#dww0Mn(@4LcjwOY
z+fSOFeQi@aZ==JR&Hi7eRoboSH@aV@w`1Fmsg-lLpEDNgftqnUY2V%TCyk{(9Z$$V
zDJ<So`L*+_!TI0cJ|_G>AuRo===0;kWj4u;Gt%e3(EVH!pt@{V^1Q!Ey3b1Qt1R1<
z99NrUd*<^#PoL7mHosl+XFhM%GQ2AL`HgtC;ricdAm%IK*NOAK`zEcDes)v6>a>mg
z6)VH9lM-$>%SyayKIq3+mb4)`_tT@qHCMTtYqf899-Jrp=3SWnvOK9b&d<Kqs^2)B
zQxkJl@n9LNwVG%1>adsY=`U;3p-y;p<cjQ>zuEc4@yE<RUNmRj?GnUyPT<YIga5AY
zIrF>u@9wMJD^-i-|8J|j$<MyKa8muZg!f$E><^xsxum%F<L&pHW$RS-zCW|te@%Pk
zz3nkRbFbgIpD*!dMMAv6_w)X<;<+ua^L@+vY!uH|w&ZH}O4}yeGj3mNRc|bAemiqY
z<(i-Q%da!uS?{$!aN50eB=7x9xNnfJa4%19O@@ue_diQyde47v`fGQixXD)S#`UJZ
zm(-44X<oJEJWF-K26dL-fg9$t++MF$Y*qZRY*sJZ=SvT2jBlJ@d)M*P<^-rmx7}DW
zqsn9Ix231%iM_e{jQ7I#=DDRISK<=mrX6d|etz~~o$Q-@GreVcvTwwn)%|{wz|Zq7
zu=xHm#_l_Z_t?oLD4kz(>BMaHXIIs)9kzKNt+z}sMY-Po>K{g?`TsMg&f}705#M)~
z`Pkz#wUbL9+1hn!${E=u{IgzfD(<e&`z`!4d&Z}Shn`($y&hT<V6i~!_^&ODpASJ}
z+{)KyV%c<+-sOCH8hT4O#1)?AO<3}An@CgF7ma1xX2k2f-+IxN^*77!sXv7)RN`{}
zi1kYJit}DLJJ|?i#?h!VulGId&MBWe<5#})_w&m`UVgl#z;yIe-&CP<lUIES@sZ8f
z+oU&Z$;W4xt(*R8-e8^dW&6zEKT6N8SCv|FUG?YZL(p_TTW#5n>BangF$=x?*Y~cU
zZU5!n)FrW+H{#Enb~8N6%yil%H~3x1()-q)yK|?u-FTEZv-)p;?w?sR*kN&(nbg&$
zQ1A61({^vezw<E`PmgeGcdcv>JI%g!iiD&2iTllSl|6UQt^E0T)&BoepPUtjru1z`
z)uekgmK7a5Cv5dHdiB${SB-l8PVGEfYtgmxM#SEVrAIhege%=uGEGA4P2Oug@0^==
z@JrPUacF$pHZlDAOGWq4VO>F`1>LiY{AE+uJA5~(+^G8?uGhj_-c6~)@KbQv410$+
zM)$XUX{?EZWyj^6nk_+mv4T@<yMG3k-P<a-v#oNc#<B{g#F=f6o}AgiaB_E2sM)5?
zUd#LK-cFyd{_LjywWPT3x!pg@u6M4~R?plJvs6A#cQfB<m(1dQ*^9FkxzuW`zkinx
zsdOm~me6*x;^j_ec|H+ZDwz5DTuhzbydphE{(@RS-fNBOR`prWk5(Hub>3^gbAO}#
z?EAliD<U4unz>E(^8rNq5WIKR^XL`Hv-%gzd!`m}FbC^(t(<$+DA|3R|3;HLGROTk
z^6_5U=XuY@@O<4yvCq5mqt9&L?dNl`clxH7rS68U>*uF^^}oMof#A^)5x1Vzr~cYR
zRqbWmf6yx8ma36IEdOREX^DTC(6usp*7V)CHL6>W&wlP|c=STgU9*?ZFUkc@?KYUB
z+_(Ey>SxQJ&!KLM(puKAy!U+1rtlrDpnwwy)OmhU-J-wuRkuOJ^iMV$4LIV&qyNvi
zUJEKGRg<*(1ouSf1T^Mf(v>@U<>{HzS53oA?4F;#RKfUo`$4<3cdz$cC_K4Q=<`8n
zn$#8zzO!Dlz2MYB?%8`Zmc4L1vDkm&pQ8(ZY&dh{VET;fpFe;ULtS&r#b@t<KT1z0
zn`q{n=!+k{;&|2QZmp)v_KMnX{AN|Ae>Oc6m+a*dmtB8rlV1JvUZf&iHK{B7NMzc9
zO0Sxx{v@u#(yq$GBKIS@j;R0I9e&znr(I$luYudsM_2#bEGeixBm5asF0T#oDg1vo
z*Y1h(+<Og<k9j1P_h~Gf7pGgSyW~y2oy?P3&#0ORH;0eX$7hzaUbD^lKC|*$=RKqI
z>{~l$JX)l(=d>_qes0X=m;}W(jp=^92WAH?2|QkUhF#`UnSACMTc(&9@h0^~&ogB+
zX4vKKy7BASo_Dv!KEKry4E_-Ix$UJyasB?~mF2AeX7<VNSzI|?q4-^aY=D^k^2CQz
zzgWEMx_<U2?=tSr5B8aVj@cNlx7#T9tn$9sQIQYNWb4j+VQZgJo~Zoqv8`R#%Fnac
zd$)*gY}s#|e%13?{=e9@TIG_TOYKt^zbkt8_0H%0Qg6<L-m)<`Z+9{J+0O-Lg24xl
z9z5!5bu8FN=KsVl&3QR5wmnYjPMz7T{?@zyuEuxoOAjtR>lGD`{?&4&94XsO@6!C!
zzw_D$_fHut@3Q}?#r-p4<GpB}`{X@`=Ch@X9x_Tvu)V#scY0a!XZ!0SPx*Z={XPdx
z3)xD`8rYfTW-OLB{;u%7=%0T)Uy_u-vYiL>lOyka+ZQ)gUVfTj@bp!el=uzw?fvuf
z&TsbrA|@MBzqi_ZUIcTjxc9bwn?9cdH>0-6RW!wI`p9X!-0|_&6g&3Y6C}aSs%zZ(
zoKGMBeU)(H*rZ-LskW|~Ma(4uuCmVGx)tBg)<`~KXZWO6IQ4|Ol>>X|6~mPM5})>U
z8nwL<cD`pG*fy#dEi~k^>mV)PX8Q-?KaOAMu&ei(rzj>?<M>ZctX1*%?`4JjpPPSu
zvT@ci`N3S*SkKc7Z$^J$tzo(^A@!+zqH#+7eUo+hbpiIX(mPt&>JR9D=wDdK{kgU}
zA8Z)o`~&7~ZP3<sZ!zod;0^m(cPC6!7Zd)&09Cy(VL9vX+zqyyZ8lasVE(~m*O<@4
z`*XL4<gWesTKT45=XVJf_OR9&hS+8A>)(FL_{gC~{s-JYjs#5H-+e;q$M0o@v7eiN
z|4xYBWV11%;NM5)KaBe&K;4w%`mENm)6~V-&)M2F=sys4cKxT95csDyxN_}J#(yCj
zHm6o589vxk4APQ#VvgpG^0VK+CPvPSkg@pwgJEBTKac3o-3_iEzxzJ@bI{Mo{(?ML
z*}0E7h6NV)=aiqbuWPX9nLC^HxBG_u?A9iyg%7tEpWAty{X^?Q<@)#>v9tHhr~YmF
zd%=zO731TZNec?*EuQe~&tvxPLyOOTpFNw$Klc@rq}`n4bEePd6!-J%aDDoIG<o9R
z+Tcj8Z<3#z`}V?mmY?~bPyT%BXYcb_pZ86f5-B2={BO%K!vq^f|AMO5nH2?omfH)@
za6F$=yg%c_eR1ZW)%-UO*R+Mi&F|rTyYk>Y!8c0}Zae+#nnnE__jAwfwIACQ@3Va0
z&6W7xVj0Wt*bU_~|I~6EvwRX$<o7>P@_E(7-BwTDA2tQ`rPu%5UcE1E@nj|GzQQ=m
z{d3YEG8?_me>^er=7yf))0WpBMF0G)G~>_S_BX<BrXB3t0PaCQ{!=JtIX}rt+-$yV
z#6P?L5TjjfKYf2V<4-KHUH^`&J1gcX9BH_iQkQgb!g=nChBX48T3)ReaAvo!ubZcF
z<iNq_rTyZIocRpqH_hMg#3TD>PXGLO^V2yD13r}2#C@+&w0bi0SyI=2C!V=4-s_(<
zZjsQ?H@f?{^@YfxeY?-<ZwX$Ns@UfB>U}-mw(Ak=1P(LH|6jcG{ChKyl9n4s#eEYK
z3{#5g>(5kd6KH0Zziu1X(bpL(cTt2*%vtUGclnw3ii8fc-v77orJTOY@+Gly7x#rP
z<u){V^+(oPM^pL8gag})>+k38I?jDjt8%)#No(U#fzE&bzQ4-fqQ9)QMMB_5SzSHI
z)M+Po+bq)L(P=wjQh$8i^eMtg91kx4Fbh_C@^4-B9sym)8PoOl{!uzy7x0wZP-I!@
zN7w4_UY?3=jCON>S8lnxta^{YT7$LS`m+ulNb*?o^!=<O!91N7p6TzNgX}h1U0RvI
z#oVp9aoYZ5pB#-N0v(e})}?`@nA}-!$K5P^CN5r|EcMfgr*-Qe+3Fu&nTpS(Yb?LM
zwVDrh-Ou`G;!^dk5>}teH)fo8-a3PE*M%cWiuK~%Z8dQnk_)(fb%aKxMpS69v)_*U
zE1LA;#Qmd-6z7YRHk60A1UM&m@Y@+-?Yz`~p>HzFb_QNF%*&sg8hpDxq@-u}jg*^L
zzDbM9Ca?CL=9@b|@S<VghLf9C#ukK>^!x@(N{PxoW;Ba5+gjY$)%P)Aw$AKZk#4TX
zE2j8P^L^X1cu}&=J+;%z!j8Yp-4v76FDWX^eE;<6W#2voUNk(HH#s%9`ofAA63;iD
z+_ds;c}Pjm=i4bauk?WoRRS4W-P6^#F=cXU@b24QCkyowL9E9sUPw#_Ye)+z>CwKG
za`Q@_p_bLL(A-TqS>|%0vX31=u25XOC|L%iRe3+(tJ9~K9s73W;J4?g!O8z$Kd5+`
z8vMB8`oW(kPcJ+6b1ifIiPOv4?5{Tdf0`N`9i?f>_ix^G-`>|t`PQx9lam!QzqG*i
zd1~<Ex&Zd?{d;n<?$p0}P&akDZ*Eld>A!~hv!%XYxB67HvQU0~^Y6MiGt*nWRvGD^
zHm$tqchzzCwCTRRWrcZ;4vH1`MBkJjd}lIm@1b)E|6`6WTl+l?<WgOC*W(rapz!(y
za&tP!lh;<fkeI&>6x5j^B|YmwLA|W==z9pgPyS}g%`3|~x5tA*Ygy-p{kKwXW-M2I
z8uVf1o}8@x@}jbjV?e<-zqhNeP!VLuyQz~N$DC0+y{zmJDEPLY+_bW`E~KP~IcHN&
zmO3bub3l>t-|J*yUKWV;d&LWh?VC?-TDi9@q@?FCNSVH@sO;l=Aam3EyZSx~fXtl?
za{C<LX}-OGtXAmj&z8C!%zk_Rbl={;>zf}>o$lNFIF|jny8djb=iB-IDC*Ca`WM3f
zZ~AoK+{&d-EPhnSnVGJeZnb0Ir%fvhYp)&rI%m3X?{7!h-Jqz1g>vujSlK)BpEs>6
ztY6#wyD-j7>ib8l9q*r~1~0z5qPa{>f40=y5ZNu+l{c?sh`)CD@^E|lDdS%UB%5R<
z-?W?UGw%1{R@E>H?N<t(457XvDE~OUt(E;gg}i@_Z}^|>KY#PsX7he;*56INELWzV
zbv<ZuPysY<zESdn!e>|M%xUvkb{~%D^kvxkKl(GTmFA7y<C|MS<LXt%i#GV*@Cey!
zcO&vL)8@8<TYRqrSNMQ3UXrYV&U5o;O<f<)J(oBy2F|(Jg`S@oU;Cdn1{tn+<X3=;
z7g%XrLf!=L=bd0RNx3ubyq=O!zxieHB#@+2ho*sc<sA1{C6V($Rvr;Z_;e-r;Jf)t
zw;!@Tb5umQhzm5v&m&lM|AO`XGsd$HnS8A0`PQExe&Q3@nl`5SHy=hf)hyl+diL}C
zXIgKiE8D;Z9r&?e=ewETY|ksb%Ub;M8rVZ^XI7snJ-?-YzxcuH(^r1k|A_}=ct_*M
zoZ`EeUnzWvo>5=_{u;~xi(@f9wtD9?4*fS?KV?7I`OeFP?_Ty?3TE3rd5~~>_5QVr
zhN>W!Tx96f1e?a@;lpWYy-w;)+_US_mnXe3pSSmd!SlOjpFdcy*NHFE3FlC(>Nzq+
z;BIrquPHybPLnR4H>Wb>OT59%JGz^<EIjmF)-HSUan|ba4cvWQWye&Ta(T<{*(`R|
zOaCXG4hr1Q{UF^%(-PEKf5&c6Zho8k&#_R+FyC<hzqoxtw=D1dIsDl9`Kg0-!f&o5
z@SBO77Q9S2Z}k6+==}>;?o;oHw$Gl=e$F%8=FT7UZzqjg1U~amE}IwgSJY|C`P?(o
z7u}yoKD*zkr1klF!gIe%{o5aQYwqLzRsZZh+uopQ;tyvOzUe!6>9$*vSI5zZ39ldI
z+9|qhH#n`=`y#Q@?daX+w_IhKpAAD}p4dG2v*ee3EdR&S-3j`9-<Bnu=iWBw$zjX0
z*BAeO6LU~5d+~W!LBZX*8^qbF+kdv~b!mB}Wbe}Z!|0IonRK4C-#dQV*y)B8I)Aph
zFY)H(v()R4_?{hq+F9v#XlBz~i8ps6t$u$!kYIhb{&U1^nfkKI&yO9h-`&*PCwW`^
z^t-^RmCJu_+p{9dub(~8=-R{b6Z;(}9j^=ecg|C)V2-@ho4{xL{b&BLHBidaQC4de
z*z9gw<a9>yy7B%3LDrtxk-fWTaIdsy{q6nrfA(sdUEKCLapJM@yPN)Qh^li8W&5|*
zYj^&J^&$WN-`mqzU%l1mO83{Fr$0SE(;D@}?pbV({n}Htx;Ny{xSqW~sp!g2BkA>%
z^{lteU9Y|7nBV8G-_BpU{%F&)-+#X+%;zj?`nm1z&13tY9N4&|u0^17KF7Dh&wqcp
zZs4BTANpA0{~j&xy*l&n&sqB{@bLSY?{Al{F7jXf$ue!pW?lBM%!Ubz?$pot&sX|y
zmDgguY2RZsbz5er3I#iz(Ohq`zrxT(R8aNav)vw18V@UGzj>zYb>vsS=cSH2d)9fW
zA7TykxtJlJF88+Sb=XUnRY4uwCitmvcN|erxF`Q+&BlNS7oR;1GHiWeb@tfebeCQM
z-H!IFY$eJq0=*oYz81A5+&7S~FMF#pciravncc^3^okw5(x3C^>;dbrmzmqMW6k9!
z>bfW<8M;jr@c(Ed6SOqe;>P)=+S@e+mZb{!?!`QHzj9XH#eTvJp`<>`8#N0md-7a<
zPpF(8b?T#;-m;D)fm+2~lbRnWUUDkoTr|VUWsa>FSIpTx)tT>Ac;BVR{foM4bT@!w
zN%y)b1p>DBHx?QmJy5l4eUV;rochI$nFeuPZ|`yJdFruu<$|6g91kvDEfNX-dE7@Z
z<;`L5$y=-pTRV~pYP2*M6TkjV$oIaKmsEEqPVmj^XUACsXDS)0LL8=Wq4&s)h1+M9
zTN`p%i-sJ(y<<jFgPHjME&Rnn{IR|!UjlvhrmQ)_aZhc})XJ2$LWhnmAwCy#-9lFP
zUBBVsGxvOr?JkD$D-yq5e`YHD>-e+eZg=*<f1hWUGGANSe&R~?>ZcmZ_~JATn>r*e
z9`cwwV^O!r#_8|H-O3fFed76S)!}tCBcbca6oJ1Bgbj~MyLexXS9JL;lkl}STgB{G
z;RFkr{KrYJtit3|I|V)}%|G}&(C6u)?rfW`b@DU1`<}B^_w{zK)SuaZa#i#F&ZMmF
zpH;ih9_PJ!GkEo;(hd99?7wDV^la9ztgrT8H~Pt$cK@-s;lBF+<HFwGQ#Z*Ox{AEJ
zZno3^;-`+7o)3jJ{HH!SBzbkzUFe+gYDtjd5w8E)_fGgXzcXg>y46~~r?-1$_w4xP
z=Y1{(z0gXL5iE5zoa%q{fI{gaSC=C}@6sMGdd&Rn;;LQS&kAS-`ds{a;)d3-`l$k}
zXM#WVu<SY|V0S$9G`~@Nx}}&$!XBa9$E2$-g!m}V=-9QEo$Y9_`ow*co98A?=3gNc
z?DVDCuf$({Dwkqd2lKRILE)WEm36mN%l7#4Ii$<*C6`_*3^?#0%Y`NX)j<=(ua%#_
z7I*k9UDW(FB5gq#m!Qfby_YjIU1S7h*bXf>_^GytX`&JDJ-;QZd-Tp_ezrcEEY5yi
zIm&PIMd?1xLawgru+zRrMVQ~+y#FWHd6%$#?&8j{O63W9j(xh1`Zq@BB#V>^c4{jA
zI(YEfL7QNqV5bu9{A$Ca49S^&hVpZKo6pS7JM**Ug@V4U@zL2Odw9#0rT3<a-*>XM
zyAgUu*?CXkx7^~w294&-q^_M)I(+V4jhXOqrWjk1a-YU|g)A4rrUt`cpTdR{5k~VX
zO14*iPtaPnE<a8^Sk0iX^Xtn?DM~@RPUsnEMRC90{Qi&FXE((|5(?+`8O)gdc9FHN
zZ1~q#T5~mI54F5~y4I<W@or<`sm9(8%|6c5mr}82_xX>te4Hj1JMWBc>yO<te$LQ5
zb#IDju8NzajC^I?)NQ(+$w?`fe(cVXmey8ld0`3i9sk8%;WO*E-kG7*Xc(yJKFi(p
z#Q%b6eIB0I-5*@I;l8X>bCF(0b4lzn$pizdrjDnp!&5uU3>|EyB}d1JygB*ok$UQC
z-#p=bAwBifu*V9Y-+Ej>E|9yYaC-geJ`aJwg*(n(oYD7C$#Z9Khvv24oxQJ|7?>A{
zZk+Xx@3()svcifEC&fQOH`0{K_M3_)9AKGWxK6X@uYeN|OQ)t{8auC6pun!0ZL|Et
z4S1FGyv-!<Dz0)-KKG0L_LC?5E@jhlK3%O@FmeCVpS!Ma2rOygRG%4XyS{s4)M>L>
zevOuEKW)F!P?et-_ntR*)4Z<-zHP8`YSp`_k+IA%<kh!lu?=T9rye%bjr_YUXX8mt
zrALK_f7tu(-}ZBx;KIkbCphv`^&<b~&pEDko>@@;lEJa<oQiBsN2UlUJxzPoy#Dj{
zHv)Vv3nWr>TLhFGle{=4I=OTh`tVew8+_kXxuoNy#!I>G<Q**nP9X*opPsy0FW|(G
zf7UR;*6+p1C$r=Y-**W+byT)J6uiB9zY~X{u*o)e4nvhI%IX-c4^T72#32dX`dY*|
zw?otJ*v^o@qM%lbgwfoNm3~@a_6x<iFXkcGXN*;`v}91t5()+xBEVbWuwgRD6^bna
z&d)9+Sg(RKkCK{Kxch;cb&D8_b(i^xzwtVlC-r9DGvTz8DbgDcH^r*oXkGLF#QoIV
z8-JVb8r}GrTk%Hn4cEau`JC(x?KhwNJN@jkTuPnaOSjwSe;tqMS}Dz5UAbZYto%Jb
z*V=0D{b|~};4Amf-D|Y16>r2g&%O9;^5&V`ho7_mp5^;)e$I|>w-a_Z+1lJ#n)7+4
zw&7NpH=E7s9QI4Tu{{`<HrXpoE&6O9-#4w#Y)`M7eOkGnwK{IY=Nr#9<$NlS++9|(
z;X2FjlnwT`PORT2_vV$EeT?=E?dHGtj;-(CKl}aZ+h^IC?a!+9H|E&AwLg1a^ZS1J
zn?EX#?-O}rcko=^Y-jbPS9cChTeUy;z~$r8IVBhNiN7&C_|2c2^J0VTnZw8PIlqM$
z+y8dmu=tXbvC}`jG)8`Iex7f-#rBUCAFaIhUgPT5=GsHYq%ZcfR@ZFEj@Z9FcFXC?
zv%8B0BQmf2Gk?a<`#eU|Qrb!|I5%vU{|5b8*WJH72t0VsCd{d$OflsTqjc;Xm-Q0e
zhTKoyCtaJPd1La~qat<R?TQ`mtLw?VQ8wG9mT5Cz<V{kh;>Hah40<+LopZB0boj&H
z=DBv7*Hv?0%@e(~sw7^z^|PPJ{sk>FM7Nv1pMLzmom|fQk8iF`+~0lnNkf9Y;dlF8
z(|3RS{?)<x1oP4PXSTn7eo!v)uW<Dudzp;Ki@NmQp6;3YqD^}i|AgoH*;^Es?TC0J
zY~`=AneQei<8<Ts4Py2wIimg_V-@PfYa6yNN!lJi{og*hZ<{`HeALeQeB7|>=fh{u
zoK-%^9{Dlx@y*YU(n(sNMJ0b@BIgNge!_IHZ1!(u=KZ@J{^|WXr1Pg%w?pMfyTqc@
zo~c4fi$4VXN|F|TQf!*DXyuG3v-tUj%CCgQYa4cE*QLLkzWwB<S)iD{sK4vW_pJ$f
zTj$UGZx(AJE_q%o>F}MSZuxcZxXSuIzWH~_#mCSvu}1dK#K?I9vpMs>J)FL$>h84#
zZa;Uw(2o7_J1WNV#>!{=fBVh8+by5;!yx7H?`Qjt&S|QZ5Z<BYdqhN7WRqU)++xu_
z;pX=1>t;UZ{GYPuu-SzD-TN-J?qAQQpYZ3xpYQilW<7J*a$ZdOr%BVQ?}Dq?XM8XJ
zT)y~%`}w|4Z0!QIj|-%2O}97xiTyHj+uvHLJq;g4Ki~f~;pggiP9==<1G}2Kw*R};
zRcGY?u<zCTWzHIw`({<Y)ba1IJMiGn<ul8DGJY+a{3rIyT(iHmrjMMDhWqL#bi7k^
z65ajhM@YD|$;*k^vTU}S4EM~^c8Tu{mWVMve?#F@xgr0Sr|&N(G+2NAfAC9yV%qVi
zxh5KO`H!0~4NrQt?3wiEcQ(!)av$A_?f1sc+vxb@eUe<p>HDt}&UnpP@$FvZJb~E5
z5e_x|l`Cd({fz5eseH!#nQCR5fNfL1!sF#NA)7g&R+N5FOnQ~p6rgCv{BHjbh5I!*
zpRQ%CKXN7ctogGWAS()ccFwocL%7Lu&))CP6aE{_kM`;~+t6wI_{YlMX_e9og$+yJ
zC3s)G-+STYF^-Eyxj!?bzj9nujs5v@^V!!=ogX>nsBJg+?z-=9gjKAQ`gPCDrAuFP
z8hYRQba~q=!A0J?K3x7hi~pak?v2#VHi~A=vCr!2N~Z0L+qJ{5OItVAza#R}wE5dY
z`vqbzwazzUS1jG9eqJDHvc-iv>h;2J-bCK3+L5$j?bZ9&H1_`Rx-OhFZOhZ;xmT54
zrk6ZiKHqHp=WUg30<%vPKC8Pq+uYk*f8V9AEi+VMj?Df2vhLa4>Y5wT^8<uxAMrig
z_iw39@@90Sw!B^b-Dv&$Qjk$R^9_Hk+*xlnw|teT7p8Hq&%U-cI)8(qCo__(Y@Uwo
z^O9#zf9RYOc)<J9`ONWWI<xKeUS?fy^8I}KQ_0zmFOP=rnkTS%iLQO_YB|MSOLFad
zi+{y{^t<q`Xzo{9f2{QEX<N<n8I9enyX)uO5KQO&)|F6iet&*Oy=nfRncAAH8*d)9
z(`P#K<^0CK9`iOzepq(wBq%PvxoW$J!-8VV%jG7&u6(urydqF|#zv{a!iP4>iOYZf
z&^ea5l~-P~-hJK;!F&VzJdRb;GwanqsvV4rURs|J@+JNa$G7P^zH9pTp6S=j|C)Qm
zo!`{{-JiQ9xAtVTzUbQaXUp2EmKT%E{(f2Yth83~yhcO+<)G`%pL)0!8Fy@3r^K^=
z%gqkWBT7vlax2}AW}oS2UjNNqAW|s!>7N6CX2hACk7zYpKl#{~tmKv%PSJI@!fc&N
zJZINQ-Je!fJTK5e#QXp3^bnb}f6?V$mny11Xnf{i`j#-;?p?=5&-14G-@hbGUwKJ#
z`BvK-^3Ao2f7#Ei_@eY|{=Gz7CC_$t>)=WJ<;M5-{XM&%_q)z)IosV`KkIBv&#!QB
zQG7LRb*f$Gr_|3YWN(+P){?zZ@1k-0=(mratGQKw7Vv)l+M)Dmw!W#onBXq+pDbdp
z_ur@z>dc(^eWv{9+0P|cUwBq}X+~Ubk3ew7Y5&`++&lC#FZ&A`U2#>u!`8v{YIXJJ
zu8u4EO>ZN0-wVI_f5!FL$AkB>^*SUDPwl&J<-B40jO*PIRpA@v&+PYinxSA<7J9m-
zuI0bvY}S&!f3MY)edlNU-BTIza<!RW)Qf4#@2j8fZ;rJ;>7Tgg%K9_A=L)p#G&ue6
zvrUQPr?bx6-UfzRDfRzNtp1U^?LGgq+q1itvd_xrpSWR1ZilS{kMPGwJNDhRIppy#
z`fUI0fR4JYy58}<pg=ddJ41c8wSShyTkW$~fBkfKJt|W1DuTaef5-Cm=UJ_f*7V4j
z>*q~N_c`UdgR#I^n#uRC$)zoFd7FOx2>#U5R*?MXyiFn79Mv0#n`=+G*_Af^JiB)t
z<H7UWo^&oMo%Jh|E4Jv$*B+b20>}6Q?+b7KQhW9&%l-e|H8*0O2vl}n=yjg%Cv|sM
zf92el40(3-pL2^R3e_6Ef0n$4x9NHGvqP#)2Y$`2+@kEX`$+zWG-bgB+AP+!ljb{r
zVSjkos$+in?0D_Vb^@{2&mK>xy_7d$Nuj^p$(S`8`vs1Q{4V^MAZ{Yw<o)c?=f`Gt
z4{B#u{*2r6_nJ_L`?t>Ijd8d3_(@$ZNiHzB)^GkO<05C_jNX>FV!V3azSfkK<bUSw
zO+F=jj_-5#ck9qwAI<C}?@vp(zomEOAG^y}{FQxQcQ#kv{-<)B|E%=9$R>+^|D8TF
z+WjAP+jvfPOP+r3!1?g0)fto2b}d{vf0@m_J?j>4{-QQV;>~U|yNDU=wsp^ot{P2L
zpY~c#=*27L_gPmz9dG)3Lrqz?hUfc5vsYqgj$WysalQDpitZ<`&wDJ`)VnlY?pGzo
zx@auwD_vZB*KOiP54~j((>LjRUsC$`)|81);Hb#w#-%c`OO4;@wzp?(%hr&|-M9Y7
z#j4EDR*{W64W_NyFTL>PF&D$B-y4eq1?Ht3yKB$7`%lHT6$*d$${C7=TL1m0X%qQ3
zeCqcOmA?zLa-%1ioj3V<+2e9mo654J$KM-l4!a%Klr=siA$mz=*{<a)=P%n58Q#*t
zQ5t9M`8ctkBmK#_XQ}^g*VqJPtzY@+(VlR_Pn*Sd&Zz3FwB`6aC-+%p;UA%-S68Oq
z>22n)KC{Pq(r>fazL)==Cw1L_`hB}vreIahiR2s+7em)8`l{R3`)@dze4?4RQuY3p
z*UGAOQJc=qni3YZD=uZ$jIIwUj&`+sOADIgwrzF)V9fB*;d5hS>0N&5R;voFU**#L
zUpL9K&*l%BbL{4s?X4F~@2aeJw0l~yX~pZV==C#xt@#<zXw~6SX#D7<UeSb~h4-f|
zSt{Tax9#e8;T6BF%qr_1e^l=0aK8@m{xzZCA3}CVeap7amdWSVon0P$z{F7Wb@aBt
zo7)t$yqYJtGdJA-JMpJ*MT)NB)|7p74UG(WYuX<R@3v5ikq<Z9|Ih4KfQ8AdUzuLB
zOI-z+f1Kl9{q)Ay@*`fI7uUQt`L%o54cW7`Z%sbG+rIV0Q4!-^pQTIZpV+y!@?Pxf
z)FUEyyI(hQy6=3qtkC&$Zdy*big@rJz5VsGRdf#(Fn)T}^Ci$Hws2`n@t&WB=lY|w
zzC`gf3fb#Mru%wn>&EJDnl__p{mk#bm+|e-u``%hx>!--lDSEPw4S&mpXlj}?R_5I
z7ykRqFXC(dSvOtBSYv|SU+LQ!zp8rjdTY<#-}Iw+(mCN!t%i(}d2^;N{I{p@X68MP
zyN6?*9NA>zcREy3?@Gyi!)@78Ja<xV^Pe!*lF%1OSF4-gdrh|A_V3@4`%5ZY-Uzk}
zvM9WCzpb&ycYChQ@$bg<hq_-E=XLa5?L4|I<JIzs%f4Re)I4XLX>R=fSI6<zjvTqK
z57qwKo7KJP+gfGln(R=ebNcLT=`xYbACJ|~{(Te`Ds|yPVeg!r)!mcAXJ0=pV^wO9
zr_8lu_w5YpMMn*_IEvQoH|c-(ykrhX0sjNuwLe^Z4+|gvBAaISz+&BbXEy)0rM*v-
zE?n^szO_cXl|6;c^?jn<j=9ybzZeRuKi@xEey!;@=i}}o3G*Y&()=4Hbgz6q<NvW~
zD!PYiyY^JX-|d*;^rT?#TC?uod7sUHDD9l_XxF3flKj&)&##ob&42lb$nR4()>|y7
zKQZNp=JD+8uiH(%%vijxcdv2xDGXl16R7&&<z(sC=4WMVPww%aU{|bnu735Yi}LkN
zmCelwi3`m99pVkeO$=9U5;Q!@dWg02fx`uZ1ru_uZ^qn_&DvMXp?T?Y-qz=#nI$`8
zPyNZACLWU6!`Si0L|oow(vAroUb3M}FIrsUD7qYJmd~;C#83PBApeE+^SZmbr~X~U
z_`GRN^J91Ab7dEOH|PBOvOUGFtv7eU!Zb-nhr`Ezb_lj=EUU|{ixHgJ;n88f!h9zG
z1C>LrcD?tzR~f}#02h3bzCML}=B3GPJly}m`<#9q(~r;UXKJT!6!Jcpm#McY(q1%p
zj(AP!^qmcHM*mk?J?E_NEbzV1@o+Z>e;xC~-t|kEcJloZI(U3%a{Y&knG?OFrsh7o
z?P-#9)8<f1LGtg9rR%1uel}mSFRSkEvb$nA*2g&v|63F@@|7E|pZ@llTw&nn*OR~A
zu9#adm-%YBX707AP5nXo{M&5iJWFog!T<38`!kmqKYnnP{;3fd^v9yV==i%MtHV!k
z;m@qyBK_n!zwUvrM@j{h&7}o{zi*ki{`$ev_WFH_o{L{k+QRX@_}%sQ{+E>a4cGra
zclDErq3HMd9C2LNf4ZM}{p_9{%dStCd4qQ;E`9BKO?g>Hho<m+fgKVBo0di_Xj-0q
zN-^qE>byI_67{VH2Ne^Qle&KGn7F)Vwb$}{CxWJady`e!UAg%1Wp}=bZ1dJ6O~3E=
zxl}S`#-~48vWj90&xNa=UfsB$%m3>|Kg0FY*)*OieEyT`DV5Z2BK3QU_l)}`7I6Yc
zMJ6_Fk)C`^O7U0b>^%*c5<3f@-M%OWu94?mJreTew#S}}%0UlqUH$ZJs?fQjsq?xt
z<s|my-;<qCRR4L?52q&|w0d9bblr7V|GBy`;Bk#x{UHH&lb)6Honp=J3$NZ{v#T`m
z^Y$Hm{+9%TmmKBLx}fpKte;Q#p-1JO%Db2E&66_n{cE!H?5>~+4VO6=K0Ey7Nk8%M
zo|{~9-6H+Z42|*vU)?4gR(!_KzlHbt>PsIAj|l7EaVeW^|6@}7f#d0ZCiRQ^*(Dy7
z&rABZ`{O@eX~AHZrV51xef+ch`y1qy{t4J!Io!;_e{W(%{nDJ8hN`@&Zqmp0iqtQ^
z-Cx$3ZR0j)S*Iq8WMG<H*dtT7DSCD{HqUonsBM4OV5xM_uDBH2q*pVar6%2wcXtZC
z-nikRru3t;ChvcpzPVz?qCjuUb2dqnn+^v}`(S#6?RDHUC+)2}=C&+OOy4@keO5=@
z#B-O+Q<RgmoRp7Gw!Ubbr*y^B`RL@NCkwP{qbD8Pxb)(x#}%K|oja1W<e!TjWw<zf
z>6s3%3CD$F#UZssm~ilRuS*Tmx-SF2<gcIh`k07f!lD_AvYU%+zUrm6^-mO6ZWTNy
z%CGT*@viF58?oF<Juav3CEaWIC~cD}Y}$3DbEW^x`)^(+%ocbVIAL?{@%2xvOsuZ_
z6q<eKy2Q_lBm2GdKoPz4?5?7DDRJh97W2JZ{KGKSV9};OPvz!!m&#cQL}psb$$j@d
z_2Gip>#n-OKxy7shX)_T_d6H3Y-E!b40gC8D&Mz%`mK{@cK#xFj{GS<X}{0QS!eI1
z7>#AO-TP15g-Xs)TgD*R{=ui8d)wt(lJ<ARcYV3c8wgAB_cG$zPVgtj{JU=$mc*je
zy^cZQh}X^H{!Q68SIjsiZ7j9%k5YG&QrF6gIbp_1f*UqRos-@o5$d7rteT|twC7o`
zC2y#dr`3r++qZ?DZiw*O-4VnQ7(8!w{J!^s^LBiyJ;$4PlNFYluar(Ud2r05N<^N6
zZCdd}-i}Ec%Rcn~+3{@KVlC;N9ldrZyuN1(e~7M``Fg@@^}PJKGj8Sn>nf9VVbzeA
zcCImbzwI%j_WkX5G91Kvm(|U>loo6G<JhLkl4{cv`*;8RF)ej<{Y%DgM)KLNJ73>?
zVk=T1;kx6(RWZxCmD7!0KinO|Zy6(=U0QqHv0Qa8&z0Vb$%o!wZQPK2`_l{i+ozb-
zUoW>+5UfkS>6U!BXvcZ|+3Wpnvt{2`F7dX%D*#E3x`wU7AFiD3oypCy)UR@0*sC8)
zD*sy_2~hLrSlu`yNym%tc9{Ayy~#@sRvo#LyejqBS{?t577>NZD{uaB3(#6tVRSQf
zb-j~<+yA7yrAb{KhutSX6;TpeKGXi?k=k^dh*P<%OXj&vUw&g!sdKP>ugThUQ^T!u
zmN@DyTeL&r)0D%f#RGi`_i1iFtk$2u<wmbSW!q0Hm&y00KD~PItnB<njkDdp{JgO@
zKz&(9nP0b;%SVaz+($y{?kYQI9^Sgo=4jiEf;Fze_P&{$^%Qq4GOycp_0O{t_j@mt
zuwN4jcAxcJIJHZmZU^Uc1GV<N)e@;)kL^DdX!*0oZwL%$S4=*1&@z78w<98d@8}qN
z#_%0Vo!vX{LHVrdwzXSjG)04d&*Yx_|D>@{g}2t-?Kfn%=!zN4w`zQM#>V8#N1K~}
zd`)JkE$etO@!uRB*&ngzz6MRZ2TJ^xw#-vYdexC-`L>Vs96P^sppW3*qxw6x6$vDL
z-}rHPG|Nf*Ge^Tk97<-cVH0H7rP9&iTYc)>?xU8+U;blAj(Jh_V8@dSUyD}tzFIFF
z+_QF(pVDGWo2Qr8?yH}-k-_Ba<tb*bg%)Xh{h7kR{C)3LCSK#kDeOVZIyL9H?R<Ic
z-EwZVN`Hg+=O0vbeV1=!aN63#oXDUcxQ-*=Vad}I%Vo^(pYT1)>JgpP^+9{$+~Ay=
zcnKfxdNU=1GWT~T@#VHjU5^jnSiaT4W20X4+0S|Y-=^Mow>j4KTI$(T$!O7s6%)Vh
zP~jEmSW^5aan=kb7wzLFZ?*0owt3{*?Du66_pD!;OEPVJk6gL;>^Udn;b!NSnp3WZ
ztUXsBR9xbnCbT<wvBF{T51xOf|KI5+VAx|Dsl7<}wbPm7j{7GzpJm@WFK2Of|1XXD
z9G#TKj^ECEU253gUnIXuIJjWW&#$w~4i>x>NL<XfD^v`cSDZ>*JLgYZb6L{YT&-(m
z=C<3%*khP`O?kPJp36RCF5bB?=%26Iza>7-{+2C=gjd(hyOp^o^N8ZT3oqsFWbX6b
z^LG2`Tf)Hw+@F^h98rul>v{TPUghHHWz*k)L*muyXRqsz{n4GCDKK$?&g;t`YVOti
z-xt61;?!C1#GCq0FduK!Q`zP9GR>7GV4mYCnQ|p=o#H<)n_90P5s3}hZ<4<zK1)up
zJO8uH<SE&+m(@vSM)?aHvhut*R_|}}{kg?Dec1*+9>2S}_j12meb&&q&hvF?pv)%^
z=K%K~u{AB*zg%jcAfvSMqp#hzm)+0ozTVa9{XFNF7QYCG(mKXJUnhnB%07Eq_@~XS
znivtg-ur^7-9Ey>R}TLRe$V&$rHw$#m7jZ-+)=s+Zb5o?tkd{-c~Ys>;+Mw+em-EE
zA#Lt*#4%?<u}A%epEi;%)){j??KDv;NjB(^bv%0Z-$b)WeJ6H7*I15qt7p}(*A#WO
zIlDQxM0bhJ9kuyu1hb~xowZcBGEs+vSGt#<my=mO_{F)zyL)~=`4w6v<(M3x!mIRq
zYWe>s7i3*b`d6^7zxr?f&xRtGB)PRr-{vQOvXkb$<`S^6_Pwcnn9Tp`u$L2eyuW<x
z@`Ve=ZqHeY6_0*fqsDT3!OHn=v#vS2bS6)B`|~15Rkz-{^RdgX-pwUSx;=r;1|O!k
z^KN6e_i=oaV=f-Kr}d<1lfBzSzwi2Ai*28{$j4}}YQC{+_0O^g{3ZK8SLgW4tz}Y5
z<tSKm@Q2=m=T{t6I;~DD?zVgH=T>9<Yy;nsf0NIwXKqm|_1!J60~$YAdUjXKi!M+v
zWw~+HRzoMBg|LAyU;A8eadX7q``XJM^s$xOYhZzWEv5VC4BmfvEq-Qh9n&%q!!Jlm
zmTb9%I>`4cb9O1`MNvdwavsu<8^Y$w;!1d%9cdhK%gg1t*Hm4?zZ~=THeXU=tC;4A
z7^=FJ7JERZ%)2s)KcR|Otwb$IaCwv9XXo2>Q9du*rmRyxqJ!c_?imI%e|aUuZEk1X
zT`}*Xz`hd)uCw0Id$aHH%_P2z+y32}cPCu>s^l|m=jj@!Q$jOzHofKMo>4IA<o_9t
zvu^G?S_2t^uw1=!zrE`D8w{~E*?s<dH^iD~*l#=;`R`~ABx!Qb>R8D>WBu&9=uNyo
zq*%Y^8TZdxYW%+Y=Ym{&-DM@tUe~)nC<a}7ZY(MN^v$#RuATBadENEK|EHYlKbvmm
zpT1h`vnRM_S^lorNcv%hbZx*cU%RA#oZ@Luo(t>L2zi{*hcpNmbk2CRT0FL6W%%rR
zA<h3S(`U%v`gS>K+bsK;pURa@?l!6=b?qy^mK*IOsJi%G=KOU6pWPuQubkoZ<;^+a
z=!W+}pDN`Ic_7_nN4;e~(jI?H_#Y>kG%tVMzvaF4NiWTw&0n8%SNzrs=dXfx^+%WQ
z`Vk|@fAC|m>qOmW`!}!uslMe$js*XyB_CHB9ufI3{I^SowR+Fb?>awS1aD8Px3M|&
zUH9h`!TtYZKEI#vOdzyw*3aWF<K^D?sWDy`Q?097@iTq%kKHe)ym9cFJ7bqa>W1k*
zQx=79|M-6Y$0ujpLn5d1ztX$;;_y*Hy;-TBOJCdF@M7tDKYgRzefPyS`*)w+6K6MR
zPs-!_9~aB*yS-iJjnuRIQM1{YrKX>)xBoAe(2_Sld-ine^A*!FCPE6Vxigdw?MkT&
zKDz(I>eShP;*|dV<ow*9rGInD{s-PaK5p`T#+X_2o?k+_N&fQOjVbr`x;r2JA0w<;
zb2xvekzB&EgQlJ9_1SM%+hp@dok^YjM{Hq6Kct~Ep<DCaPMyye^V&i`)E@Jn)h%te
zMJ_(NK4(LIc<N7%Z`!L$Zn0id{agG@zRUh*&ELbTjS2!n*#eKmzu|u7$g*pj|LT9w
zqn9^WJbHZdOTdSZZ@07kZv9!6na{ev|3~-Ny>>VF&(_apd@J}b{r`&o`wtFwl>RuE
zd9SRqH~7(V)z>9%mwQ(V2loU%N?vXw_~GlX7eAKIFlT63eEdtsM^6^P&-RgW@l(@l
z!cKiZYoBt)y<OAUoLxLLPQO=LAj!`3^zXI4ZX5Jc>JAna9xJl>dEZ7?q+-j@vr5wz
zW`DdHrvJyq7*ZLQCcTmimGXDLzxv<t4}ZTrKDyuU(*}!ouS%Zro|T_$Q`5Qp^Oflp
zO3Mrr81i+F_u8%%uJ#L^*Dh+;G57i96-(|m{IR>9wX61z-Pa}j4mKtq6rXI-TW5P@
zR>pKlFdF#!D4JaAuloG4_)@2t>#JQq7unmZcskGd@G*DYLhrCl_L<WU`>&6gdUN|J
z<xfZ6yMM^MHG6W?Ty4+ij?LN6W_wEU-@Q^@GU-_9xf!?PXW19%v;Lm^bKmC=B0HNa
ztv~Ki@tqO~3$K}K%LF8MpFO<5=Acm3Q?|3g+z+m_ltxZ3lso<GVbVSm|C#q^+D{dz
zRl3t<k^DFNCdUzxqz<-bb>6&xHm3HM_S}2>>}yftgD*=yx)f%9?tU!r`B-C>eiC0V
zyT7_gPKS;Xqs`j-tACQZc#kKadHwih*RO3MnH*M5Cl|#+0&{hz<|1eJ*}oNH&;AfI
zk6Pr>An`N4_~8B=<@iT#XMX8#o~W<5?!!`z9tOjslfSJ$Q~$j48Sk}CKfe|^eYMvw
z5fJ^o^XCs8sf9}urkn1+_Cr@{D@Ux_l8Xw41+^#tD7KnNSt-oTew6)e|H0Bq;o^@K
zn|>AwzJ(T3%Q`jX8j5$Q?I?GDbwuRis(h8x6T4?kuMOJTBft62qNzg4MTV}{hiv~p
zP|-cK^|YzT!(EOEOZoW<_9pO~=C9s1`+pe^+mf0)F*`cbru@h}X*Ks^N%FEz%}WA`
znub%P?{8TlZCv_YOPBXOlm8`)KkbuEj)*M0!L6Qoz;~Wi-t;B5iZ{e_H$}hK_`GSO
z{9JYIY01h$50xG<*<7mmow<kO^Ro-7ofeBXg59g^`#RFsM{ugo&fKZzOoa}nm#Lq=
z^1WL6`)iI~H?1#%!B?hpE#wfqzFqzF>1X_`l_ui%m-fkC{q(Z=@0mZY0{{9yq?~s?
zwR?5yF1rq)H@kxlJ$%^q>(Yv*3_nO)dxqMwpoj8~Iq$uu6&s5F6*O$}e|C)7b<ed$
zGgi(1rXU_Hxa-``?VI`X|7_I#VW^~kXsdJh-7EvQuKUjtRqneTQK+okzVC(vsFL$b
zH@Q0_GpUQMNxJBi@!8YI3Z+YvUR@29^8fBVvoQI&(}R`F0>LglMX!#8oV)jo*YD5X
z*`Y7}*G`+h>Zw&;M%=TE*>~Jr6e`OXFUaTxms#htlC<PmJi}RDuDo#KXo$%Boby3x
z?je;QZ*?VU)voG)lBCt%E_iI+hYtm_dXG(=;5OGKG`v|u{87%zV;y|!uS#_GLZjR+
zD=uE8Yh}7h=ESW>M4YuE(qt5$zS8O1(!6TZ?Nv{wSQ&0T>sIXl@xy6m=BBQB{UJVz
zHzb0$O2urqx$)%oG^xYEE|wrao|~0pD9--pE{Csq+N>E#J;8n=T85KC)R#S4u<vRQ
zw}3!!>BWUshNV`YS6n)tQ<KAS`{aF%WpSap?gzu%b8Nyl+iX&ubrCkM6>IL<tP%1;
ztK&)1t9fQGt#`T_iX9E9*{QtL&qvU=c5$V%=<BY3a!0T9u6nxmYLRU9ttVzr+b#&f
z67nsP;EtLqaf##G#iRv;OQr|EPtW<ZJ2%H)Ygt8r$96*}>%(6Q<%&19zDUyOdeH`I
z0YS%UjeLDRnz@{-IwF#<VrZ6`DcW6oyJO`Rd%Ga@W%J^e%I4QO2DiPqk{DVg6LU^N
zV_I@}imc0Ajl(lmE!CXHS0!>b^A%_yb&cAxX_HMN`e$_gb3c-i+2tDFmGr9SYLS=T
zGCqU;M@79hsyE)AJ?>+;V6N4o8CFZ8{B|o9@3`k@EHAv-<mjh$WfyxOW#5Ou&%czU
z1%rEXm%b65WWvJFzoW<3>)|iUn{L<7J3f1z*Uc(aWGK3L7mu{(!(UIM7Cu*T5|>M^
z`F#I*;|!}E)857Z-Cw$<#eRu=dY#XuyrPYTUVpZv%r(_qyQO5W$L*vp1D(vDmsu`+
zcyQ)o=5_6584dy&EtQ4;rda$i@6&zwR(qNx_arCTJ&Z*?KZWg61u`tdjo!~s7uW!*
z<3P>*+eux8ty3P}I<auJK>V(Vt$PFwziNHnwCB%6r`RR?7e=M!$e-@{dir71pE<oX
z|3zl!7<4?i^x}wN`yyFr6GPZg)b*Tjj>)ob(?X@b#x3$vF8W$p+$eIpsd-fqC=JH=
z9BpS=clk%cp7}p-xLoQqI@(?o$D`Qwl~KC4^l9$r_?i3Ha>ab#Vjh#e-Dp3bg!0iH
zZm+y=8w9#_&VM8wy?X!Jz)gw)yW*B~&Uob6@bt@Do|aRY&y+gfwQ`8diJ$)Hd8_KE
z$kz+^ed8;&mVGn4e{Rbg*R%EQ%Rf|XJXm|?wBXc5e0n8LM{VA^b3|VLvBB<Z$F~e{
zESrc1zc;SGE7i#KLnz<qW6C+{VnMcZO8-7CU8<F<ob<}~S?c52Ee-a<6P0XrtDAnl
zzkI`GTS(?d-nGYPum65xst{kk?4*AkI<Y2;!LvBxCW65~MF06$9=mn)x8Yk4nfoHV
zhLgYP<+%7bzDaek_&ABRzI@hxR_|Yp+Nyu|&ExxjV8`_1)9o8xPo8dc$>@f+@8ySE
z9AS-J@l2&<J0=tqwVW$Idw8Y5_Rj4p^HT3I2t+Q*x6UoNroAj7eQMd9pV{j;ETj~!
zsoTn`_;`Fb`OMvZzdk&)jm^YvX8h4VN$NI#vOY@|?+>1OdA9FcNP9O^Yng<Px72m_
znpvrK8a18k+Fy99hdzBF)|untW4K}NvF7ZSJiEH?>8AVA?Ao{%iYZoo4gPEumu9Cg
zaQpC9No7`>2{QTre(ydLulCR?{paJ*eW2t78XI!xKNPg7$0L2_MpsLhs!yFs=Tqy}
z##}xopVz5iSXP?-bvk>ONKMYkW6vwQ-sFCL+*)~K=|1aQFMLDfs;1`@X|SJmIjJ%K
zyN}C`|HXTcSxrAy<SGnm0ehJ*DcO5s)6drn3zK{ntnSfkk~x+eU{>2I!npPFpWFpk
z8aXe2+H$a0+PPz?KWD>*h24tJxt{HBzU=<ZpuWn@CQ!aK{`{ZLCUx#(flGB}cUzni
z^4&4t*RHl;@q&d`=k~}vy_t8>4m@Tix9r-1?LiZZ=W6M`4^sZ*X>~}=$=T_<K`htG
z_m8}PRD3C~;r#EDcu(flX=%m<v-{c`M0VX1h`4h6Yb*a{1w*^nQ_bvVOgp&k<@@5l
z$MpI+u3wn=EdS~;EBTDo``0L*$^Z|x<LLl7mAK~r*;3++vif2RqSv&2R{Oo5y9KK@
zGdrz8n&rQh77HG1kjt|B(zt)N-$_0W!|YqoRUx;&T!xOxY)P4W?#0z(9EPok<px{M
zQreHSS{yE|d_)ARb*49y>@KR*n>(pXE4!RVT0;Tu27=oe*+Eg6FRPr+Ov2_SNXd@8
zaAn#2ZIROjVgny7ulZjB?PER)zu$k>xaEc55B0jP{br8;^qw^)pZWWxu+oijX>PN?
z<|XEJQM(k~XRJRy{ZZ=+ZG<vLyO`ej_pHxrEZCAOkTeb4FMYD?+3Sz06^g&O{&miO
zwBC2Z{_e^Ly3gvWUfV2gxU|i&Bl428z3$x!+B4RFUkh5@(^&s#`HyU2(DI=ldv?y3
zn~^ROaLbIt(7WpK@}<&6zY^>X<G){uoF`Dru>Xkt$KXF(IX{(OEI%jx=KaBc0y*b(
z*0;cny(^Uy{<VDZo-;M-a#PbeE;4`c{v&Lk%lvcq3*&Nw@8`cim2Ca8Y@$$-sMnt_
z7eLDt_dYoG%=+Wrg!FTtT5kA!aQ`Fp|A_x>(1Nlvzt8^t%vkBBsM<S4VDmEYaKMzy
ztiRX%bh9-&Eu3V0Q2ry_bnS4{@27*C=W3tUURuuECtwPWo4K2F_66?eE8F-{N70P+
zk3jtq{prUyOKr~iRDMxk;HSLV_tR$29bcz5E4#RXhPsS}?<MR@RFA6mcXgW5@c&`^
z59PX>te?s+s>|iR>$f+PZ@DM7H@Be013cQ3cIakP?ZukMc0zCFJih64hUE|6|HJ$r
z-T!Rje)4{i{vDee_nZDI-%yN<3$D8TfMa3+sG+~+q}b<m-2coH?<d%Ax+whn=<&_1
zFGT+6x-I$O{AZ*0llMuRced}GaEN_-dS&hf&t_$p(_pV;UrJm0`^JmTxrVBHoYrlh
z=Q904{EtW(<<|cX+c!J>(=%)4W~*Mh^ZwS-_v;p)-R1}#k-E0zt=CJY&SWoxIOTeu
z4Sz2?IhnB3A6ozD3%f+!_0a40OrE??3X9RaF}eBgu{+oQiM%<nQRwZZvfBzS;!}RS
z6g~U=+LQe3DLE&qBDP)3esXc2<Qv;(ljZZKpES$(!TYaS{;#p&pPZX!(^Dt!KYh!*
z=K0=(`?4SJ`}!?Gd~4pzRk;F-e5)QWkIm;0mfdG_!$0>=n!;pL!#|w+v$CDeZJV(F
z^sY_vEY<V2e!jc+>xI(W3NF%Lj`^o=RXo$`G~?H@gYPmY`zILPjB308!2U;Iv~Xcr
z>z~+bZL)H2)UFkkAF-MldRxIoxa#rpy*C9ETLktx<~Gk@hj2gYf9cvAp>l4DddIy5
z+Z@j@@BMgr?ZL9p`lHXBu3zB18n4*KSNHFaP2iWddnQ3nYn(dg%Sq+9e+}oEz<g$I
z6J+D@%-yEFF7}$4OIl`_f|{CrGnD6L);0aqK60V+-@ipR&R>{JEt2HG$>GQxhv~=j
zJ`10TFa6r7IQf_LWdC=j)3kpzNXZ!TgX1vCuOqhR;+3oFZ9ebbpOMo4)ruk|2a)>n
z{qm~)_9lC8UN^t;prdjDDB(v=)_per(1Z0iWG?CTXXvb6?awpOs{S~i_o<Gv%O|s`
z7i>)6U;EE~apmvDnu#u&Ag7+2RT8xFh5%@TGRQqbfB))Vy`K^9!`<n-yn7p?;@^u2
z{cES4&;2KTK2}rl$T6@Vt}U7Db+qlo?jMU*LaeY~`>T5Ik~ueGc4bJvlYQfU@LZJc
z=W_QAyVp$-NJ^ga<E7Z^1@7xooL#DYUVl2^xAIe~1lQ`H*5BSuos_gWb9ZinkBpO-
zzI2CK=57D(muK5_G%oLg7~c8fy`j+5*=ir(@aJyl{cd3Y{{OTkb8r4>K38+b@>55`
zb^fxM3Hs*$osLJJv6tE>_v3&?={)5d^(@u#8@{veUcYmOeg1~!Gp_%>e=zUK>fVjz
z8<Lx9Z|u1f+#H*-rsXCtXx;J6GrM&)Pai&zyZX=41bvhE|6dQT3tGB7=Gva_$wu-#
z-zq-8^;XQ<ZhGTt(_jA$<}AAx|FpZdmH&$8J=M=WjhD8~7rQ*0H!|SWE}Nj>@c9x#
z**~q9dD{GTyq(KD|I{z1X<_DRuckkH^r-)!UAo>9zqP0Cs%o0|9zJ`zPUD7u(_YOR
z=Bu`(v;Gd-@PF;4`dqyS>%Xoym36t(-~3nahII2@wHxP~{vL{1;W{sG-@o+c+Uqd}
ze-HdA7YKiIyzp65@5ja~+t2U6R`O2f&H88ikLK@LtM&bF!gP;QiVODT?Bl)S__v0w
z_2<o3N;^3&*t1xt&oc}FH8-j@D{`L-eC97GAujj4X}@7_jbhuOsDFJQx9FSAH@nTY
z$eg29JpJY6BO;kEEmr+KbM&zK<Sm&lhOG>WR;(@uX4Ey8u!Oc`@MpSAP~y;S3D(Lu
zC;w(ig8HqD7pzHLrGh4VrgFHff7C7debuE0??P`euX@TolUpus*8>Tcj<|{cUaIgK
zif;eN^X&E^Glee#$u1X_(mB6L7Z+~R*z$6D?~Ai-9CNuh2AsJ6+jsVJ^|JSN2Hx}4
zmUYNItvO|rwOK6CXK4~!hrq<TgF1iDKMPb;_~bG(+5M*Wo%=TL4`f+(t!xhWvT!+g
zqrGI0cG84R{;Q@X$C}4$8a}doR#|f0Q6yMAPV?afTTiRI(*+mBE|HzIgm<pl-z|dC
zXPsC55uNRwKgX#<Qlb96dG5UV(KD`>Zr7=uHRG4VCYeMZ!MkM#cC3CoQ#SIctKroC
z)<x3q-bYIPF}q>C>VGDC<aE80vNJE{dfXEXy{WNm+N!4<u`}aOU+RbmoVGE_p{4KX
zXV8gD;cAuTE2aqsyUZ@~%h%HroGEy${o^B{%5JC5`9<@}L+g(+XclrQJ`?>}y+8Bw
zpCcWfA{-MllU~`LEMjRCJt}fh@tSw<$54TzSBkdEIQsn6^==OFF-(|SD!E}_a@XAr
zTGB^D<X$@Y)S7yqK6+*Ks;8URPmw6h{=Db#s?;MQk5|1?xKui;a=J=v`<wg1$Gmvv
zgL;_mX9TSdCQY2M|NCsS^XJc=d7U(G<&+@9t;;K2G)#}Mm<R@M-(IM=S<3fFNKE<v
zsY1$0;tzUPKfQWHM2XArXo>Lm)&JtR+<fBwXG@B#q5BJ#Zda#&_wsMq*mtnKI(1xW
zYS+rE8A{8NO4{{~$}Dqtxbh@<nb#4K%QjbJqmM<_-l>tfUb$+^)gvM`Xa4j$ZJYUD
zycfe&{7Mg>n^}sj%1`2oOW0@kc-A*lantXH@!vn2N%dcG@d;#4o-5#6qHEO4xc}%D
zww*6xynXIowoy7dySC6qJjYO8T%^8u1*DwqyTB>d-STJe{Vlb1afx;ls`^#d_9u0{
zecWqxg!8M5^}S>FS7a+KYtZDK5vKT;_u0g%HNE@_@!md$e-FRUEy`PYddBt0a-DV;
zP@`b8TJ)Oc{3{(wR+a~yzKDRL?f0LU!+ZAKR>|yWQ<a|ob#|xjOmEHSOlEd3pPEQq
z*_&dwJ%lU&+|{kczoY)XSNt;VEc=}6XVv_UWSy*ew_JoJ+^&}~bz07q(&?L0>>mGi
zeD=BU>6)$guk_xi%AMAyaYyRS*~niP;03a)pp`fz?)1)`cR!=7yU6y@?fTP^R!XmV
z&FuKE3TT4no<K!6ipr^Hci);(8DsKT@YzL;U7C_x<~fVJk&ym3Cs?)4tEFh?uiH|p
z(n`A=pF8OYefnN^`RdQgML*SBT#V=C*45c1-V5EiqU7mv>1F29N+o(noD}Qdn_Jl@
z$-U^E|1HhA<y>oo#*_Dd-JZLHYu0-?9T#>AaJT;1d;i6sj>-k*b+=~og514u!v62l
zbL9>BzZ~=5K5O5+JK|N3MV!tsfP(S!pAWPCeG=L2wPACB#^YDd^8a_%6%=gP{iM@&
z!4dzZ)ogs{cJ2M8edNN#{oiL#a?6rBbH+FJ@*Dqy|BP=)KIUrt&;4y?O5~QquFY>x
z2wy+Af2RN8h97T*LA76>fD^~Y#9KK9i)K8kE;-$KSMkQ)rrMpS{Oz|N`k(t}o5b_j
zXZJmI8vAYQPabo9p9UVZ{=#NgQmOiRed_1LV{eUS`lv-;6_6_DDs$Lid^0C=^TE!f
ztXt)eew3VE-!wOC^89qcx0hUeK4wbqi>!EVlPnC52D7=+v+Fs3FE^9CHT7hQ(am2y
z{SQ7R#vEIF#;IV-%?E2#mpwTAEcH=)^XC&+oSSToZnU1UciE$T<9GAj`>&K$+u5qS
zD=qtO-(6d0_Q9L|xBlm%ns?74X8&oEyU%w=?v3-ar*WBf%e0@~dUiK`&1v-*yNtML
zCry7|DCE2UyLw*b%*SWy%hzudl79Z|;JuVM_2X5udw+BN{%QL;FLc}2b)IrgFEgay
zD&H>KsAB)={z1F>>Um0_@H)3_BY1a?@sDLcIqPQn+BKd`F`AikCr;u`>9bhv|2}Vy
zBy8WBm$~_1WdDq$9+ANQAB~S{&WPOqlDxlO;LYoUaiVXmpMBk@pviuI{Uv+ZH<1VL
zh3hJtyio1@5NlmGnV+?I&ChAFr?-SYUv)}B<Hy<kGrBiVXR%hjalctMbZX<;XZsr^
z6hD2w_Nx9;+3flT%R|Q}|4&$a|E}Ph>(89hy@T?U4W9Mf|25~Q-G1{MnFsGBDhpcr
z`o!AZ&_BcaJNwM{i5iNxOKz_6X{miyt~Y&Yz8Kh7vkD$fTfIN`K=}GX4>OU+M{Lz@
z{L55SY!Q%dI&tvfGv3-g`-Q%~U-f(aS+;ajLsymOGoSs;_rCP{h)C+ZJY~n04^9z^
ze2)qhteh<ug=?0~oK)AJHPf6!P~iTCj~-Xvo;hv1FX~=uT>UTGgPUj7^YcF}{iv0@
z`e#+of2B#Hiig~+9v^NftxwbAxm@&{_4m@sExC&o&i&{WPkniNRmnHopzCM98-70c
zbh_82jvIe(#`G+Wxwywqsi_v)=rhy4tA0cL>}%(XcBkC@C7n1FXE{&5Ht|^?%e$@W
zEVrxIxL9wwY35?+YVz#K5dr=s$sNnLSg-N(5zIWf`$5OH4RtZ%j%^$s%8m^T%7?bB
zj@mc#XBEd4kyCeX?~(2|zoF5badu&J|DMIGK_i)3F%P6dt?o`~Sdla{@3@ST*PG-w
zuVVDqoBwYxNj_h+Px{T|lSO*V^m6{qcKKX%<J#<$%<5Uw>s_bqiv*Xt*_XnW{$~3<
zu`*WU-3rk5{w?Q4&+P8s@%+-<Q1kF$;ageK3SBGT&o0+rdThob>sjWPG)t4Z)-9i-
zKI8h{rBVN;?YwZNp8a=cWeA5{);>9XH{Zu>VfqsqJR}x1t#iJS75B|eTUBP)VUK#3
ztuouEYsBuWo#Xy#UHH^k{dnGQ?z7kMFOF5)A$aoWmD02Ilf@5T;||n$^^#e&Zr9OU
zZzuj1SCyXRd_Atm!?2;B+nqzvW&4G%;>UUSTAa@IlFoHb*_yX6^GfH+J=I)AhOS>Y
z4^HXs%FthXD%U)||J|C*JD2vHxn{GtsWABcnt#iAQi~O*bg#77*7-5~=Qh`9-v?I<
zCsj_-U$1^k+uc?D$Q9i)+tp=z_KE$uR#^E+?c%ceW{Y}xJCm1mcpP=CcH&Un_1x%L
za#hD9nJG_A7xr#bH1an5%3AEWRqXr5s*lXuIm<dK!{t-utmpmJPZCcMNsnIrbkDvR
zpO4d@zFspW<J|Hy{k~Q^>ZEqg=(;<B-}{o%>1X;@M;PB6xZU`F!^w>yvt4J`-Rf8-
zy(wp(UE+~l*EL!NoJvlYeg190+<ALZ=Isea-iD&e_SfSz9~!LddACx7slvOr=%>Ti
zVwT^&8@A6be=69T7+|TSq}sJ;cj)Qp)lat`5n;X=`E2bsnQgr*q?~e<ZEo)IJJz_=
zpsj0NU|(#DhtzM8#2DkZ<#XQ~upT-YDV2Qrhi)9p3+CKknaZ<rIJ7MM>JR*pcj8b?
z<IivXd+$$N%~PHAQ`B;u&*>Z$ae5N|tXNtoG|}LDXmjl%t@4aez86`^56qem&)nao
z$y71B@7?a~&{VmcefMUcWtE6s{oPdliQn(-?xA_vp?SK7tY@lSRX%z?n_r%Idw$r|
z#mm*5%J1u~?w4A+p})hRkN@%ch-3VH=IcN6X{*+Han}8IJh(1$>BnQXPbFIX0#Z)a
z?BcxiZ?Tr}!x@w6R_oMm&baI^eKEy(#)QeGGygf{zvWas^6a2W_nrQkX$yE~ZQ-@t
zEE3$np*YK>%0$Uz59`cB75A^t=<bf_TXA;oC8uXj8vATJ6}L%L%=63Lu=P}Q|BOez
zhvz=Ko4V2OUwZV7WgapzapFf$IdWV&I`f^e;I<f^i?t`Cr#?B}A=)@`@mbcSnCbst
zpR=84(^9=_?rhK5b+<S|e)YY2@Ai;ovvB_@(0LwbrZk^@e|-LFW3J526Blia<0|{7
zQ)~LdPbo@7QR&DRYs->U-N?T>FL?Cg&Hl@r&&szJIvL4Vw!l~4-Rk11rwPxc&;JtV
zSfV<cM@L|%(%w89L*se+;bG-DE4T%ZN%y~QwUnKH_PCSb@=)25RHJKpw)+fa?W4B+
z$`rdHey?NAY`4VC8XqsOeI{uCFVBj{<MDz`!mIc4Dz*qLs?Mm8?C{$jDxDuP-6UvH
ztU!UuuRWnR*k3$KUUaGUbC2&5P>=Q{&ohg}<QbQpITVjPJFJlO?)2%%D$c!G1!5wL
ziuq3nn{zU0_%wd$u}Pi`u2-!dFMB3^*uhG%MPN3+abj_Nnm|!-)zt?bzU`8?e}3`1
z*Sqs3xEPvS*z)t(2S*o>!t=#v-=E%-du>XbsfClzLWsR;hRMl&XF*|@!*zVd{ps@4
z(?u?<W^?b*1nbW}B)(vK7-Yz=MPShkv-hVl2KzV^cP%{oy9i?`iBm^j#^-%^pre_2
zhV_r9LMH&`2_VbC#z>twF7oEyn+{&E@}hVCv^<ZORiM%VZus7hmoM>Z#`<$8x|naN
zjL!ovi~$b@E`80Z*z%$c=DIJ({P}seE&c@=Tjfyv=Q{iQQ|Pd1$?Bi%OMZe@H8cn~
zb-Y7zA!u>e-dhriEdpzyt^hYWtrovL7Nc<^Xx@whzt4YaH-A~TW5dt)a}~YMcit|&
zrc*1GZmeHFO{aF+hPBuE$~Gsk&&;1JkW^u^SAY2y=-?(~1;Edc4VOP1=~|kwyZNu}
zjl^WjyC0K2|J~_2Z{o*g3I8{IJwK-^{c~0Rob)pdwoSI$H|C$&u6S#H^WHNt63gbC
z{KLF5o6}3t#Xhqt-Vd?_5gda(%cPBceRNr^EpOa!x|?aUI(S{$KKuML#zN<k^?dHl
zPhWE=t!t(D%>Un_&&C@>n2C4iuefBtDd<vV-PF2cI<Xvv@~BQ-b5iuI`<Bn3MJh9_
z47aA}E%_((#_VyV@x#*x|4F=&sdPKK_w4tdWtGRYa;D92`l0AyKP5fnjlunOPgUM7
z4V`slioj>j%fE}f&=N$Pig>X1GpDW-=g&_5GG{e=^|GB36+s(Lt5h!MF8iKf?s+Nb
zUDo4Aiq9LL{oZ1Fe#MWJ4fAJ~w;!B&cDG`D#y;Egyk(cQ^e1(%{5<n|{mQ9Ae6iU}
z;*GuCZv-6oK2!g=&g`ab`0=G@>%Uu8x*ZKY6VJQXajVafR~4U2`L=(2>{9+v_N;Pj
z&SB-Ix%Ma9y=UeBzj?Oy5$nNqv!58dy$gKydh+ofx{5Bl(E<a!E7LUy5)J}y+BQ1y
zo;Ub^_U<#M2kg%-PFJ36<dU?a-OX0D?D1dsY4bKs|5>q!|C!{oq{g?dCk{?aP@n03
z(JpbH?S6y*yvO2QiwsM9vn=oPwMf2nX;-Rd{~a^+-_Hc~8Sdw9<R7_|^+xi|^MiHS
za-PxJx4z_k-t<T8InzOA8R5FwU-WNgNY@F!`4uYpKqf$~OfjkFG}E`g<p=BX>*lOz
zuh}Jk{g$5MkzdHEWBUwto*$ro4b#%su8U?QrDyC}`8qUN_1MnlO37nik1$?x>5zEb
zvS63f)UxTT?j&9<dQ$n!>E)itDn8~jl0W=ZR9iJ9ZcDqKdENZi^hRj7*AMCHH{!47
zulCATT6Ql-@7g7g+pf)Xh2CTz^qarrdZBl5qUYY_Y}K<Xzv%WpTY9DRvtorr!Thq^
zchYZeAFRuj%WT$IwlBTT$71~r)>TFETRvu7GyD9h-|te3z}XCVvWDhp*F`ga<!y+c
z+08udbKWKnK|v+mjorFV$$b@$DU3~R$E<txHuAmRr|LPIZKC0~*9SeN7O^sZ-l)=>
zw_bVD(-UQ%zv{>7PVZXDJ*#_0__`?)g;OeTKGoUV*FCfR>szyU^;_XNMSo*_3Ue&g
zOPfClW=*N}*eY}M%Hc3C3!TaH%6_`q<>~Ef5qNvy%f306z=asJpz2mw7BPR_Ms4=n
z_iefk%}{w(b82BWr{m_m$6h!sTCMA3F3qHUtyuf>{iyR6vMrbMKCfGQLh;$_eD6z2
zE|n5G|34(G=lv%9c~hZJ$jTW>(UEoYec!#;UbROf<r+5=KjV-4&zyQs+$x$hzgzQI
z@Wz_?Q-zczmiXVxUiDOC*&~TH98Nu7mfw0(cSP@u#9bjLlf^G<Eg|a%@BPRGt*sO9
zoVc>y|5C=5m7gu7-yc%i<YJYa=Ql4YQ$>qwnugguxi`nR{rhO?K6|}9>&4Y)7stMI
znY-ujkI>IWH~P=Go?>71bXk&CS@^#zYTr$xq-;+o?hy@KnQ3s3)A8sxw<5#R*I6?g
ze_Qf=d;X^|N3m-qbGVm9hvTWIx98mo($Q(jaDnH2XnCzaY5!UA%Kpl|{pT+U2Fahh
z6!!Q?!hF;E&%GOic;e#qqtCP+6F$Sf>GSP9C&eDVHCvRjpQr40!gIe*^O9C~uVh~J
zls)9M;1T|pfj)+QT~BwdxKLHG^XISM&m=kT8_53_IeGNT!K+1cGm>>gDqT({%$kvu
zBl+C2UeST$qMh&i)0*H4>&r3!nP&>!zk-Uj#>}KHw^<oHcT28&UaIu&UuX9z_{FK@
zg3sS{=3f84QeN>uSzMk?wQomB?P)jW`<EM^O<#ZMk;U36kI#PAi`IRf)b%#u=gF%@
zEj5=G&v1&l$UEKbj?(R>+FNSJZI&9ivV7gEa(+cZgYDf_9oIQhBiT1{lsq*Ib+y~6
zej(N)@>u>s6T{N~T7UB*t@a$YDcX9?p-S<{FGTu3;=g^?{>XFpeV3<z7h88U&)%QE
zc>8+^3BO9VMen1RDz33mn9`f1+7$6X*FNrhdd@kw#0JZ+pQP-QbX&JyTJig}*)2P^
zeaaIe*puyaP8<~p7Bg7OaJTOxugjv)`-ba{yEHo<wV89iWMg4b6Exf<y4+Wfi$h6G
zF}0S3ThL@#`GedehjYJFuT1#s|M}CNoyR529u%7_k<H%s_JD-Zw#Jo;Nk=kg%}~lZ
zzRk$2?`uO|^sB`+QXhqp(;{d@Yq{~PyfaxzT#F`Dq)#f-|Ey<zsXpNE-)>*|4%yv2
z?`Nm4VQz7rW4<cyulwwL`{w)pX5zUWdfQq{lfK93En_frzS3FJQ?+e&`Oc+DU4~Kl
zD^3Yabw2RHUTJdW=4(tBzw1f=^~<t1l%FyG(1Su2v7avc8Xj+0?A5K$@xlIgmqoXa
zaBzWY@k3X;*O%X(`rl*deSBf(|NHlLI{1c9E&byeUiML}&2{02u31ePdM<~gd%6_e
z1k}=I*5o%f>aj+~ePVk3d&?Uq)i`hFpLSadDqWpieIFg>o_3u@Uqi=1*mSK&&PJW6
zYd=4psXK3X-tzOgGc$|Rp2_~(zjdDJ``zD*zr86=J8${F@|$nR%^6?!&AFT!>GZ{Q
zcG<0GGw#jUYINp-wT)WZhY7Mgy7dn>bv*g_OuKP&x%}Ol#R+UTRkFQ%o8IklOf2Vl
z$+^fzeOA74@r<7fa*j#!u8}crUGVUKM0@QS=bn<uOSi7>2zbh*>KOT>=-io4-hzuf
z6LYm(WSJ)H5_+m^b%n+FpTwVkGbZ@$UBP_Zyf)8;iQTNKv`i_r!_-AB$!LCsLd%P`
z$oHo$W2dRQ#Ba%5p4<1bcj+o0=8jpf*H-qbD7Hkn?%JbY9yv{*_TsX++boxvPg`Xq
z>9S|rZ2pLJw>Z0k7!Jj-nR@R}BMgw_eyy4-z0c2Cf9lHy<+HDkMyz_?W~0~=k&0qc
znocfvZuK3T1ewr%-zEz<-4T27Y2O`0C~e%LyX9@66Nlojg{OZPO;6>#D4u)k<-TXa
z%PQXXn29!SI}U2~wZp8|@wLv4o`^%x^x!nzYX1LevCFE{DwF<g*z{*s?!NW%8<>92
z>ir(=79pO!_Mlm`%M{Zs*Q9#PcE7o^Z-X$`ZRNC%sf%ECdtdgu9k%3cq2RXM#L_og
zI-dMK6D}v--JGiWBd0z?Pt0xO<vaaTFK(BbZ@9jC^2XK+zuMA0qwelI88LMptMBuJ
zi@ASaongVao6A~Xt!wI{)xV8OK!Li^<n5MR*IRPa4`)5g$vX4u4riF|`Rx+~KeHRQ
zPI&aWw(f-Y2Cm=rvwr7<8Xo;5dAoUIrD^V@&sAsJwog8C<@TB?o?Nrs>6>#8@0RB?
zsGom1=@^T^$JEDJVT;69dy0R5>H-cp@5_Guru_yVtaxMRFi10h7T6VgMu3~uu+>4y
z>GR16(-Y$D#a#EE+O}>+(){CUH-0_e{Os-T7dp0`nr6Ly7p@++>kU8B)0b$wV_Oo}
zIYayMjk8J$<Nd%MxVUWYr^VkdJ$!vAvzW=UT<eYfnmU``hxRJ`{=edopSk?!jd^;1
z&Y6B!&Xvj0-h8*_{_fwR-^8D#Zt#~)u6w}onYZn{_M6OS)o)y1L@5Rx+~}et7jSml
z-ZS@~-)of(x)DF?_a<@KQig9rD!smQZYJv=`d2zf{O6Crd;3KBH?#9zu6*$M)L}uz
znq|Ft68a|Z{pL<lS#~MyT=vwM_S`DFq^0>!9=7hi^v5PH^oH`Po7>A?2i2S9?8{F~
zkF3m{{Y`zz-U8i^Kea#a72r3~fBbHye8Jz#&(EYEOBcWC<HML*{7(MNB{SjR4=)ex
zlNMgyki9AI-oBN;Kd-Sm*2%lrviw!S#-BT%|8^7F`pfzDpFRAh_IB^~YJ@eTXQ@c-
z&3Sb;{2g;F(q3I{tzEr=U!*KJalXbI>u1N`Y29`EB>ZgOd%ZVlpHCc|dU#8}*tbcE
zR`vDOGyZLg?k^Dtv$^B(-z!mE^xM}%|E|{@zQ^*s@q9G*?xQw~Chw}d7ja|uncYYA
zOzrtE3Hou$&607p3Y_TOR(Iv?jx+C9I?pfXiQi~GJ6^u#vD@L74=am2?)Run5NMN;
z-u<hhLD^+q=Gi1J!J~!o#`gWM+x}l}o#r4j@mtyFs`%g=_A|L9P9;Y#V3{nu<6F$<
z{`9)(GpA4Fj$Sl(iD6sk_jC5mF)IDS!5=C==WUdi|F?hUj9u-Ol{I>~3Y(7pE{vN#
zyPjX8f7|)C+Xwi&{byc(nDu$vlp|vH4jm$Om;c--xmOv@wvO@cjOXno8>P-v#h-h*
zNk{S5B46v=VxRmoQ8TyCu-8cvkDol_QSHBjH9evV6aHQNb8vDRW1mu|X3hK=OO+iw
z+$G)``zQw;b>`SOtzy^C`=>XXC~?@B{9nG|Z+oVvk0Q&Zx`sK@kNQhk4#c*<?1_DN
zr}K?U#g2~#DQU?{wh5ZXR}DK?N}t(X({buew%k{x+S(V>6`a}>W~90Ph*1#C?ToG6
zBi=OedtVY)qlc})0!^jkuV1&ne0_L}%Ca9ZMS-42`bv(ZKbrgO@oc{Hmp(YhJe++*
zB)0$0|LtLZ84rvts%P~5@b*z!CLw*a@Hg*&dDFD6Z7wcc6VK@!G>Zwi!M{ez&}l(z
z^o{9j{>(Ee{j6xSv+w=(pdW>21^;bX`J4Z;(0jxG4;8}tF_V<@l1BE@lqoZ+Ql_z;
zmt1DAwZwhW45yl>DjyxiBl!3WFG(iwx^xP}Z1ffsJkI>)eA|C@!>!tuJU0{P&2yjL
z@aw|@78kKY4eVJh>t_A2Qa5yU(QR+nxVudL+$D?ldh_>_X1B#C2u^)ubKR`yZ=#P1
z&!QQ-wrqGGS=-6@>iF|nEs;XOE-cC}enG$HJ-g0UDCTGq=q41rMRrk)=N2^qv5xcl
zDVdWyR?dC)boq-+q2R?m9yhmC$1vTNFYG<vDH+!F^GfP<m1P;tO9Va7JlyqXW99cU
zzpi_4g)bewmVWJbksr9|%Dt62JGm<P+NldqkM8I{p<(DMqLN-aV_x0K`B_&p7~NE~
zJ2jOOE#3Jq3x-;1HZ@5k@Cfr4?Fvb{DIqvtC2rD;E~WND{U!DmOWvEkXJc+rvh$TT
zUT(D~`$xS{@bR3ADFUsB%w1o<=sWcNqqN-QiQG#}KeO8|;u6qu3Aon&-`%iuaiz#c
zP9=4h848^%)h4!FP@BZwZ~EPO<(Yd89>SHLKDCEWJyvoNvg{YxILpQ7qN&f%?~?!S
zUpebu>9TIyOz!Y=^|xMxWOc{Pzj5rvJeB_MZEA*J6<50jAD>v)v4mq)glyVg{pYZ<
zbem<G&Yd}x0zSXHtFE2;kdWPVq(>;&@Rq||PyZyXUcr;Sjb}W43|CmI+aBoSf4OtU
zqU@gilI=w=IS$KDn&HL2*F`9pN9O#^+@&+DF5PL@a1rj9D#GcNr1hT9%jSxWKIeSy
zv&u$>tIYZ|CcIeXBe=A*Sa7CY|BLGG-nd&zrLz?UCeLtUa9g)2NJQ}B;rH85zu~=h
z_Vc6}P78wPtCh|Ay{yV#Y1zK4x^<onrkyKgSmb}|_emyu#4auoUREZWTl?a+%Ca3{
z*VF#@KNgX`+jity8fr`)yR_}Xw&SW2F?+4mqN)v#=InJeXt|=3#3guWhsLpaJ<6Rc
zFFXHIN$TQYv`f!<GOgeq$9a<ki_Vo--U=_&?tT~Cr_6Qa&5q1Yrq1F;eA&+~3M-j)
zzBt|R^2X9x3#5DW9A<?0eBAly`|s3mY%Yu^y_2{cYh7o?Hfm^GSLQrjn55M?O(iLD
z&I~WXW7m)N7z$WE*^z#vhi%poky4-QrtR;}9*?wL)qP`ekF(RR8^2JCq%#q>#BP1t
z!NR<{GQ;gw*UG>%O3NBHTHoJZ^r5<G-F*FM$<`UJK8D*m+Z8X)uQ?_;C!x;f?hFGz
zpNpX#9}>&N>wbrCR#)OuVRJH`@RR=xtEZiZ-LHL@V;YKl_+sNr_%ErlPVUlN6w$NL
z>CY0!8x0ysvO1?clDa<TT`?{_t@`7QfS>$nP;FFhGT+qWrVFp|<0B%6rv9l}@LMIo
zeqE&9Jzd8wuZy4Eo$FxT^)~0X!P^Tuypwi#=c<2Pl~^Sbne^KDb*<&{)0<jebep{`
ziT~Wdy?*Y+Wl9&m?Kl!Y+y2KRoz1Dj!I@<@^R(F$Kc6~0@7L!8O_4`La?HO!c5J`E
zu!!r7(C%}mKFg~wtGl*mHSa<JWfy(Z_wTxz4=V>MFfI}@Y}xQ+mf9lyi7NO07iqJ*
z+&}%N;#je)na}JHA4Wmx8o?}%w)UC)P3pYfNnI;=llT|9i0c~^e-iq9UQ68VM}gh#
zrn#XvzFv#juCgpH`)aa)4Nsf<5u-@O_<2l$r%mr0duKOgh@~3OkTGO;pV5Et*L0oU
zkC8uO6Y7i|I*c3U>iI32l=)2Vx}4y#B`rVh)tI?_Jo3u$b*<%dpU<1hT1phBb;>?_
zJLTK1fLnDxZ^=#fNP5PpSbOt`$Oi-WITQM3JP>6pOnPR#dWO=aLpzl$J}j4>5}qdf
zZ1curotir?+-cuvXgS5TN@T-CjR_BKK0E%;*CYQ^ZP!m{8+(4XbbHOuGu?a`7pcTn
z=WL(K9ijG+MW^54_sRRxjzY&{6qf}>UR-Bte_uk?Cu!BmA9pf#dpf8I6g^qr`h{b&
zY<Xm4Tvq-S)%klIO7v>`B3`YszEhJEU#BCuS0zq(OE$~%n~Je(*f!srUK~{IZ#=Cv
z!tLLkJ#&wxzrU0D`)PJ{Ow&8*y2AaIOYASKTAg}1)p)}%g;#sqUOsMN<e%SoHT>$b
z?A9+gt~ScQteh_N!LH<ej`Y5JYiHH2wvdR}Titzv<E7@N2@e=0cc&)A7au(GTi|J@
zK&8};XV3pGf5yE0JI70Y!~TR15AN|O*%Y4ST%^mqdj0FGPq<_HAD=nT{7~xt<nzS|
z`+iljG!_K-nCLfagv`=)7wI&UP}YxmoqYEGZ;91zmxN-v9XBwaz5Y##b^hB02e0Ln
zZ0UdPY`uK1A5#*~L-)I#KA&tm3uM`zT{pEaKR4N=@9C}MX6#dZr@U*Jp%~T#s{i(<
z$BEe+v_^!zc)DorvxB{@7rvdo{%wcMlqYAy`mH*@-8r~tb!Xmkj@U^(#<#LIJUOWC
zB$d>Yr}Jr=WSz`~rDBU?x|6m~J)P5Wb#eFaqNa@#r+oc3L-B~v!4{2MFIQ|_Q@r@?
z^!0BGE&m+acdTa5gc-bxEMtzJKC^qyi?SXA2dgskJk|*1uHqy{uVaE2r+%;d{LNkb
zjnU!ni#3^ca&-JK^3&-x6khEv{{88ZgU?bguGKlR>67c{vRxn2rSB?Qx39l^;p+6V
zgnM6IzE92Ik9De%D|v6L{Uo>aMUu_+njEp`9kKae*A^dnX5c*WsmhU8m#)@I{<s$?
zmMl75b(5A0-|77ZY*XZK7iyeL%Q24M-+#UJ^3&<lZhH7Wwmwo~aBq2L&4pD3)}KFm
z7S5S1;lHb_@Xf*q-CcY1nIHev(C@PpO_EG_qy2WWr}yOxU%#iQO<QH;#Gz=lUw?jn
z+`ScdC0im?be{SAPD%T2Qu5b!X6MpXJpxW0=iZ-IpFRD$$wJo-*UyQczgfP0@Oj;S
zb>od8Qw5zkF8-{owEuiGLp?IcwWIRT2L-W?v^gKryje{zBCM#|slR!$)9m>FPG3B4
z8NJOlOlJD^W8R%zD;{in_}b)4kR)i-{pI6*HLEITzVDTE(&13M6q9>Tub-=O@`dBy
zmy|Bd?H6|9xX537XZp>zjk;ZR8BvRUblBfKaj;GolQ*8ad};rC&(}`3<<wgQ7RmqG
zrN95uo;&78c@&@VWP8`%++#aW;FDUbVOaLz>*q3lKTZB<-&Y-Z2DC0hATFye-cC8~
z%n7?^tDkRPpM3cpm(!W<EsWp8&$eE@bRo=VZc5!9>7+IRr;gSSR-b==(llJme8XYx
zCVL^{mKnl1X7!URt!<2+=+4=e^osM@@mLYibcznkw~bHs)x?~6(O@n9&G)tc8qST9
zJ-uG9y|&Igc-=bpsK{)tWdXAVoH~wv&iwrQ)8QmHcbkiQYQCMkWB-KnqPq2&+j32H
zn%_U=YA*}uyl-Y77O$n)5}|WuSMuTTmUI2Bd-N=i|Jhfcnpn<T>GXwBY2L@GE$<yo
zvnwsOZJ2xbdZuUmD)D1HiY)@gPftGk{<u5Y?f3@GzoO6HfBoM2H^Vf#!%uR?`XayT
z>N?ZarfcSw-ThFiS+VCQXWiW`R~rPJbSCl7xZnT$V?#pOecrIX_{dd}p-HI{BHwI3
zmmT{g6MJ!+`#STwB^&%_g?HVZ-74U8W`6V8_lM1I{n>So&#2!1-gl?h+p`1jd|QyM
z@4BRH=Ih>d5mtt*%%8nB3O?JrI?{<lv8t`zXVz1b_oe?MieFhjdtY0bSRKB(b%xZj
z<o5>h@>7+9V!1RUybM|MP1oO;o>RBKt3|+R#@-Wu{{7bd?D*pIGltI})vq*qdb)>f
zZk;isI_l?P2E(gYC03r``~3YyRpITKi4KpRpV2P&Oe|lS#;w@$qH!_z?xQu!E+;$R
zv-$b*kX^<<o&)zHZpex9E4m$?VI5}ne_!SA#GlQM?=BdwhjgZO-h|#b*=nnr=EKP>
zbh#<dWyQPTvtmvh7Zvv>>n+*wJdeM!yWyQ=TG{eKz5BXvSfAN%Yt4A{?b&Rx<$J#w
zsF{4(+jr&Xv-d}<%a1jm<$juy)+!L&6=2xbr9Un0C#RxI`3L47Hrn!br_J6q%U}8u
zu;ka@>_mO@-j$-N_W5?1Z>6+vb8<JF&+eXae%Ai<<-dJ|;&$J(@%dw{xSsb9muK<{
zgK54Mg^Ax!&utNK@?kaqXTPavm4)Df<>5BdxR3eO-Z_=d`0e7Mb2@JVE*EiaSM7+h
zyJB<T(2Nx(J7jjR+8~^>?``AE<^4Ku_zaW7^bG7*rXTB(l!(}w<FskL$hWx9NA0$s
z1}%OGejN96-=TF|U+w*JxApGnZEXDp{Y(B%4?W+={!0Ap`PAh<-A;Y{lNx>bhfcZJ
zw{?g1C8U;@Y?7Z*{(R$ceW&cyAH1K1|D?aJ<5J?To|i56>a6}UAGOIR)wbu0e3N>e
ze}C`3h1Va=I24!tuc7z3dUS5G&ezx6)sxnKSW~}^`NyoEod2UvyykL|O3a$_qBAUf
z6KF-!>Ra!{%0d$NXRKzA)#cZE^W)Gq(Qh`Z=kL$SmYV<6?DP3)+p1?vS^woJHY|-z
zEa$2O9}k?w;}e+DBh0<u*5C;z=m6oIP>H-FYcB1~Ia3?|y1tejB-T;+G3N6;b1~Md
zr<lOHf+w_yoVGXo0lC`A=YYT1w$omUM^pM*50@N^?}ywU;eBj<#Lk~)vDIg^Vk~r~
zI<#&$Kl}WzWNT)yk*UqG;TLq~Yrbiiu-#zB)8o@vGld_ggo}OS`y6|EcEy~l*XNyh
zJKgI#*BWQh+G6V!C2I@TT&*pZHg%q8cjd+G+kd^UbM4P8y?@8;t@F`MKQEe_oAu-+
z-MJNGAHFNh;^?b=JKsj`3ai)#(zEr2ly$xP{W~0c%?{f6UNqkxXl!t9!>;^1-wluQ
zzMOumx-^^h+R@H^J2$S(7S=Hpsx7wOTe5aT`_<aw>adqljkyaK<>%$(+&gkF?EBf=
zkQp^+U+vpzn|pXOmvG#o{Jh0x@z0-0)D}yNx~A`){bIH>x0hMrhO4#3$+9cdB4Uhx
zJv&yW?JJX3djAfeeyq05F_7*(C2J4VWn45j-^Q5PR9*k&Y;SG+Y>RUssRbo#KcuYw
z{p=g}((KmdQFqO^2Wp>OAAQ$+dv#7l*uzT6+G1(bliji&i}LfzwPUsCfJ}ZDvnxzu
z`?~8Zwl0iIDZPK^0*~*8_LMKa$Hbg{Hyri(a`vs|(rnguJJoMaTDR`pVV;K{%bz7~
z{x2<6{nn&#TIkX6Ey3Q`xz_Ya*A_o!GTnIj)xMp7L4H?#yEs2@v6$iB6CjbImr;uM
z8Md613*8m=qXiT`9xtOBGm^fX&HaCA>!GWl@VS3!>mlB-?`Pk>TbkV}z4qPg+biZi
z|9|_{zMZCPUz`^=`StWz+*IE?peXtq{W7Z2Bk#-E-oM(jZ=8R%Z|BD4p~CD@cg@Ys
ze&#$900qLvOIr^ec(re5ZN}lvTuLgxo_(8lX)9Bb=C5bR`abw7yjql>=QB5t>7!(A
zadqj-D8`90wZ*?DgOu3)dS;fDoV={o{r;Vb3bR5;S~z<{dd<7px3^u|dMFyiSfMAm
zO|5g^&Wl?wA2|wg_Qj>yt*6(#n=SpD+w7y}>fg_fIW6_wu+{I&*<Riyvv+`^XyeKP
z;m2jFKUZGb>h$kV{-(g+cMbD2f2k$!o~b5Ka?yPIY_IEUIS+NkYySz7xN0CWGs|t_
z$~2Lgs}`RKG4Ap)g;JZjCThOA^rblxbPhK-W8o(rZ9DXzdET7}fv;<4tYTnbVDNPH
Kb6Mw<&;$TxtB7>~

diff --git a/docs/reference/callbacks.rst b/docs/api_reference/callbacks.rst
similarity index 100%
rename from docs/reference/callbacks.rst
rename to docs/api_reference/callbacks.rst
diff --git a/docs/reference/composability.rst b/docs/api_reference/composability.rst
similarity index 100%
rename from docs/reference/composability.rst
rename to docs/api_reference/composability.rst
diff --git a/docs/reference/example_notebooks.rst b/docs/api_reference/example_notebooks.rst
similarity index 100%
rename from docs/reference/example_notebooks.rst
rename to docs/api_reference/example_notebooks.rst
diff --git a/docs/api_reference/index.rst b/docs/api_reference/index.rst
new file mode 100644
index 0000000000..5c66df1690
--- /dev/null
+++ b/docs/api_reference/index.rst
@@ -0,0 +1,27 @@
+.. _Ref-API_Reference:
+
+API Reference
+=============
+
+API Reference for the ``llama-index`` package.
+
+.. toctree::
+   :maxdepth: 1
+
+   indices.rst
+   query.rst
+   node.rst
+   llm_predictor.rst
+   llms.rst
+   node_postprocessor.rst
+   storage.rst
+   composability.rst
+   readers.rst
+   prompts.rst
+   service_context.rst
+   callbacks.rst
+   struct_store.rst
+   response.rst
+   playground.rst
+   example_notebooks.rst
+   langchain_integrations/base.rst
diff --git a/docs/reference/indices.rst b/docs/api_reference/indices.rst
similarity index 100%
rename from docs/reference/indices.rst
rename to docs/api_reference/indices.rst
diff --git a/docs/reference/indices/empty.rst b/docs/api_reference/indices/empty.rst
similarity index 100%
rename from docs/reference/indices/empty.rst
rename to docs/api_reference/indices/empty.rst
diff --git a/docs/reference/indices/kg.rst b/docs/api_reference/indices/kg.rst
similarity index 100%
rename from docs/reference/indices/kg.rst
rename to docs/api_reference/indices/kg.rst
diff --git a/docs/reference/indices/list.rst b/docs/api_reference/indices/list.rst
similarity index 100%
rename from docs/reference/indices/list.rst
rename to docs/api_reference/indices/list.rst
diff --git a/docs/reference/indices/struct_store.rst b/docs/api_reference/indices/struct_store.rst
similarity index 100%
rename from docs/reference/indices/struct_store.rst
rename to docs/api_reference/indices/struct_store.rst
diff --git a/docs/reference/indices/table.rst b/docs/api_reference/indices/table.rst
similarity index 100%
rename from docs/reference/indices/table.rst
rename to docs/api_reference/indices/table.rst
diff --git a/docs/reference/indices/tree.rst b/docs/api_reference/indices/tree.rst
similarity index 100%
rename from docs/reference/indices/tree.rst
rename to docs/api_reference/indices/tree.rst
diff --git a/docs/reference/indices/vector_store.rst b/docs/api_reference/indices/vector_store.rst
similarity index 100%
rename from docs/reference/indices/vector_store.rst
rename to docs/api_reference/indices/vector_store.rst
diff --git a/docs/reference/langchain_integrations/base.rst b/docs/api_reference/langchain_integrations/base.rst
similarity index 100%
rename from docs/reference/langchain_integrations/base.rst
rename to docs/api_reference/langchain_integrations/base.rst
diff --git a/docs/reference/llm_predictor.rst b/docs/api_reference/llm_predictor.rst
similarity index 100%
rename from docs/reference/llm_predictor.rst
rename to docs/api_reference/llm_predictor.rst
diff --git a/docs/reference/llms.rst b/docs/api_reference/llms.rst
similarity index 100%
rename from docs/reference/llms.rst
rename to docs/api_reference/llms.rst
diff --git a/docs/reference/llms/azure_openai.rst b/docs/api_reference/llms/azure_openai.rst
similarity index 100%
rename from docs/reference/llms/azure_openai.rst
rename to docs/api_reference/llms/azure_openai.rst
diff --git a/docs/reference/llms/huggingface.rst b/docs/api_reference/llms/huggingface.rst
similarity index 100%
rename from docs/reference/llms/huggingface.rst
rename to docs/api_reference/llms/huggingface.rst
diff --git a/docs/reference/llms/langchain.rst b/docs/api_reference/llms/langchain.rst
similarity index 100%
rename from docs/reference/llms/langchain.rst
rename to docs/api_reference/llms/langchain.rst
diff --git a/docs/reference/llms/openai.rst b/docs/api_reference/llms/openai.rst
similarity index 100%
rename from docs/reference/llms/openai.rst
rename to docs/api_reference/llms/openai.rst
diff --git a/docs/reference/node.rst b/docs/api_reference/node.rst
similarity index 100%
rename from docs/reference/node.rst
rename to docs/api_reference/node.rst
diff --git a/docs/reference/node_postprocessor.rst b/docs/api_reference/node_postprocessor.rst
similarity index 100%
rename from docs/reference/node_postprocessor.rst
rename to docs/api_reference/node_postprocessor.rst
diff --git a/docs/reference/playground.rst b/docs/api_reference/playground.rst
similarity index 100%
rename from docs/reference/playground.rst
rename to docs/api_reference/playground.rst
diff --git a/docs/reference/prompts.rst b/docs/api_reference/prompts.rst
similarity index 100%
rename from docs/reference/prompts.rst
rename to docs/api_reference/prompts.rst
diff --git a/docs/reference/query.rst b/docs/api_reference/query.rst
similarity index 100%
rename from docs/reference/query.rst
rename to docs/api_reference/query.rst
diff --git a/docs/reference/query/chat_engines.rst b/docs/api_reference/query/chat_engines.rst
similarity index 100%
rename from docs/reference/query/chat_engines.rst
rename to docs/api_reference/query/chat_engines.rst
diff --git a/docs/reference/query/chat_engines/condense_question_chat_engine.rst b/docs/api_reference/query/chat_engines/condense_question_chat_engine.rst
similarity index 100%
rename from docs/reference/query/chat_engines/condense_question_chat_engine.rst
rename to docs/api_reference/query/chat_engines/condense_question_chat_engine.rst
diff --git a/docs/reference/query/chat_engines/react_chat_engine.rst b/docs/api_reference/query/chat_engines/react_chat_engine.rst
similarity index 100%
rename from docs/reference/query/chat_engines/react_chat_engine.rst
rename to docs/api_reference/query/chat_engines/react_chat_engine.rst
diff --git a/docs/reference/query/chat_engines/simple_chat_engine.rst b/docs/api_reference/query/chat_engines/simple_chat_engine.rst
similarity index 100%
rename from docs/reference/query/chat_engines/simple_chat_engine.rst
rename to docs/api_reference/query/chat_engines/simple_chat_engine.rst
diff --git a/docs/reference/query/query_bundle.rst b/docs/api_reference/query/query_bundle.rst
similarity index 100%
rename from docs/reference/query/query_bundle.rst
rename to docs/api_reference/query/query_bundle.rst
diff --git a/docs/reference/query/query_engines.rst b/docs/api_reference/query/query_engines.rst
similarity index 100%
rename from docs/reference/query/query_engines.rst
rename to docs/api_reference/query/query_engines.rst
diff --git a/docs/api_reference/query/query_engines/citation_query_engine.rst b/docs/api_reference/query/query_engines/citation_query_engine.rst
new file mode 100644
index 0000000000..9b0108fb0e
--- /dev/null
+++ b/docs/api_reference/query/query_engines/citation_query_engine.rst
@@ -0,0 +1,6 @@
+Citation Query Engine
+=======================
+
+.. automodule:: llama_index.query_engine.citation_query_engine
+   :members:
+   :inherited-members:
\ No newline at end of file
diff --git a/docs/api_reference/query/query_engines/flare_query_engine.rst b/docs/api_reference/query/query_engines/flare_query_engine.rst
new file mode 100644
index 0000000000..0121dc1a07
--- /dev/null
+++ b/docs/api_reference/query/query_engines/flare_query_engine.rst
@@ -0,0 +1,6 @@
+Flare Query Engine
+=======================
+
+.. automodule:: llama_index.query_engine.flare.base
+   :members:
+   :inherited-members:
\ No newline at end of file
diff --git a/docs/reference/query/query_engines/graph_query_engine.rst b/docs/api_reference/query/query_engines/graph_query_engine.rst
similarity index 100%
rename from docs/reference/query/query_engines/graph_query_engine.rst
rename to docs/api_reference/query/query_engines/graph_query_engine.rst
diff --git a/docs/reference/query/query_engines/multistep_query_engine.rst b/docs/api_reference/query/query_engines/multistep_query_engine.rst
similarity index 100%
rename from docs/reference/query/query_engines/multistep_query_engine.rst
rename to docs/api_reference/query/query_engines/multistep_query_engine.rst
diff --git a/docs/reference/query/query_engines/pandas_query_engine.rst b/docs/api_reference/query/query_engines/pandas_query_engine.rst
similarity index 100%
rename from docs/reference/query/query_engines/pandas_query_engine.rst
rename to docs/api_reference/query/query_engines/pandas_query_engine.rst
diff --git a/docs/reference/query/query_engines/retriever_query_engine.rst b/docs/api_reference/query/query_engines/retriever_query_engine.rst
similarity index 100%
rename from docs/reference/query/query_engines/retriever_query_engine.rst
rename to docs/api_reference/query/query_engines/retriever_query_engine.rst
diff --git a/docs/api_reference/query/query_engines/retriever_router_query_engine.rst b/docs/api_reference/query/query_engines/retriever_router_query_engine.rst
new file mode 100644
index 0000000000..34eda95685
--- /dev/null
+++ b/docs/api_reference/query/query_engines/retriever_router_query_engine.rst
@@ -0,0 +1,6 @@
+Retriever Router Query Engine
+=============================
+
+.. automodule:: llama_index.query_engine.retriever_query_engine
+   :members:
+   :inherited-members:
\ No newline at end of file
diff --git a/docs/reference/query/query_engines/router_query_engine.rst b/docs/api_reference/query/query_engines/router_query_engine.rst
similarity index 51%
rename from docs/reference/query/query_engines/router_query_engine.rst
rename to docs/api_reference/query/query_engines/router_query_engine.rst
index 76b8560286..12b78f410e 100644
--- a/docs/reference/query/query_engines/router_query_engine.rst
+++ b/docs/api_reference/query/query_engines/router_query_engine.rst
@@ -3,4 +3,5 @@ Router Query Engine
 
 .. automodule:: llama_index.query_engine.router_query_engine
    :members:
-   :inherited-members:
\ No newline at end of file
+   :inherited-members:
+   :exclude-members: acombine_responses, combine_responses, default_node_to_metadata_fn
\ No newline at end of file
diff --git a/docs/api_reference/query/query_engines/sql_join_query_engine.rst b/docs/api_reference/query/query_engines/sql_join_query_engine.rst
new file mode 100644
index 0000000000..cda670e208
--- /dev/null
+++ b/docs/api_reference/query/query_engines/sql_join_query_engine.rst
@@ -0,0 +1,6 @@
+SQL Join Query Engine
+=======================
+
+.. automodule:: llama_index.query_engine.sql_join_query_engine
+   :members:
+   :inherited-members:
\ No newline at end of file
diff --git a/docs/reference/query/query_engines/sql_query_engine.rst b/docs/api_reference/query/query_engines/sql_query_engine.rst
similarity index 100%
rename from docs/reference/query/query_engines/sql_query_engine.rst
rename to docs/api_reference/query/query_engines/sql_query_engine.rst
diff --git a/docs/reference/query/query_engines/sub_question_query_engine.rst b/docs/api_reference/query/query_engines/sub_question_query_engine.rst
similarity index 100%
rename from docs/reference/query/query_engines/sub_question_query_engine.rst
rename to docs/api_reference/query/query_engines/sub_question_query_engine.rst
diff --git a/docs/reference/query/query_engines/transform_query_engine.rst b/docs/api_reference/query/query_engines/transform_query_engine.rst
similarity index 100%
rename from docs/reference/query/query_engines/transform_query_engine.rst
rename to docs/api_reference/query/query_engines/transform_query_engine.rst
diff --git a/docs/reference/query/query_transform.rst b/docs/api_reference/query/query_transform.rst
similarity index 100%
rename from docs/reference/query/query_transform.rst
rename to docs/api_reference/query/query_transform.rst
diff --git a/docs/api_reference/query/response_synthesizer.rst b/docs/api_reference/query/response_synthesizer.rst
new file mode 100644
index 0000000000..09719fcb28
--- /dev/null
+++ b/docs/api_reference/query/response_synthesizer.rst
@@ -0,0 +1,8 @@
+.. _Ref-Response-Synthesizer:
+
+Response Synthesizer
+=====================
+
+.. automodule:: llama_index.response_synthesizers
+   :members:
+   :inherited-members:
diff --git a/docs/reference/query/retrievers.rst b/docs/api_reference/query/retrievers.rst
similarity index 100%
rename from docs/reference/query/retrievers.rst
rename to docs/api_reference/query/retrievers.rst
diff --git a/docs/reference/query/retrievers/empty.rst b/docs/api_reference/query/retrievers/empty.rst
similarity index 100%
rename from docs/reference/query/retrievers/empty.rst
rename to docs/api_reference/query/retrievers/empty.rst
diff --git a/docs/reference/query/retrievers/kg.rst b/docs/api_reference/query/retrievers/kg.rst
similarity index 100%
rename from docs/reference/query/retrievers/kg.rst
rename to docs/api_reference/query/retrievers/kg.rst
diff --git a/docs/reference/query/retrievers/list.rst b/docs/api_reference/query/retrievers/list.rst
similarity index 100%
rename from docs/reference/query/retrievers/list.rst
rename to docs/api_reference/query/retrievers/list.rst
diff --git a/docs/reference/query/retrievers/table.rst b/docs/api_reference/query/retrievers/table.rst
similarity index 100%
rename from docs/reference/query/retrievers/table.rst
rename to docs/api_reference/query/retrievers/table.rst
diff --git a/docs/reference/query/retrievers/transform.rst b/docs/api_reference/query/retrievers/transform.rst
similarity index 100%
rename from docs/reference/query/retrievers/transform.rst
rename to docs/api_reference/query/retrievers/transform.rst
diff --git a/docs/reference/query/retrievers/tree.rst b/docs/api_reference/query/retrievers/tree.rst
similarity index 100%
rename from docs/reference/query/retrievers/tree.rst
rename to docs/api_reference/query/retrievers/tree.rst
diff --git a/docs/reference/query/retrievers/vector_store.rst b/docs/api_reference/query/retrievers/vector_store.rst
similarity index 100%
rename from docs/reference/query/retrievers/vector_store.rst
rename to docs/api_reference/query/retrievers/vector_store.rst
diff --git a/docs/reference/readers.rst b/docs/api_reference/readers.rst
similarity index 100%
rename from docs/reference/readers.rst
rename to docs/api_reference/readers.rst
diff --git a/docs/reference/response.rst b/docs/api_reference/response.rst
similarity index 100%
rename from docs/reference/response.rst
rename to docs/api_reference/response.rst
diff --git a/docs/reference/service_context.rst b/docs/api_reference/service_context.rst
similarity index 90%
rename from docs/reference/service_context.rst
rename to docs/api_reference/service_context.rst
index 0de94197ef..224bf9a45d 100644
--- a/docs/reference/service_context.rst
+++ b/docs/api_reference/service_context.rst
@@ -17,9 +17,9 @@ the BaseEmbedding (for configuring the embedding model), and more.
    :caption: Service Context Classes
 
    service_context/embeddings.rst
-   service_context/llm_predictor.rst
+   service_context/node_parser.rst
    service_context/prompt_helper.rst
-   service_context/llama_logger.rst
+   llms.rst
 
 ------------
 
diff --git a/docs/reference/service_context/embeddings.rst b/docs/api_reference/service_context/embeddings.rst
similarity index 100%
rename from docs/reference/service_context/embeddings.rst
rename to docs/api_reference/service_context/embeddings.rst
diff --git a/docs/reference/node_parser.rst b/docs/api_reference/service_context/node_parser.rst
similarity index 100%
rename from docs/reference/node_parser.rst
rename to docs/api_reference/service_context/node_parser.rst
diff --git a/docs/reference/service_context/prompt_helper.rst b/docs/api_reference/service_context/prompt_helper.rst
similarity index 100%
rename from docs/reference/service_context/prompt_helper.rst
rename to docs/api_reference/service_context/prompt_helper.rst
diff --git a/docs/reference/storage.rst b/docs/api_reference/storage.rst
similarity index 100%
rename from docs/reference/storage.rst
rename to docs/api_reference/storage.rst
diff --git a/docs/reference/storage/docstore.rst b/docs/api_reference/storage/docstore.rst
similarity index 100%
rename from docs/reference/storage/docstore.rst
rename to docs/api_reference/storage/docstore.rst
diff --git a/docs/reference/storage/index_store.rst b/docs/api_reference/storage/index_store.rst
similarity index 100%
rename from docs/reference/storage/index_store.rst
rename to docs/api_reference/storage/index_store.rst
diff --git a/docs/reference/storage/indices_save_load.rst b/docs/api_reference/storage/indices_save_load.rst
similarity index 100%
rename from docs/reference/storage/indices_save_load.rst
rename to docs/api_reference/storage/indices_save_load.rst
diff --git a/docs/reference/storage/kv_store.rst b/docs/api_reference/storage/kv_store.rst
similarity index 100%
rename from docs/reference/storage/kv_store.rst
rename to docs/api_reference/storage/kv_store.rst
diff --git a/docs/reference/storage/vector_store.rst b/docs/api_reference/storage/vector_store.rst
similarity index 100%
rename from docs/reference/storage/vector_store.rst
rename to docs/api_reference/storage/vector_store.rst
diff --git a/docs/reference/struct_store.rst b/docs/api_reference/struct_store.rst
similarity index 100%
rename from docs/reference/struct_store.rst
rename to docs/api_reference/struct_store.rst
diff --git a/docs/gallery/app_showcase.md b/docs/community/app_showcase.md
similarity index 99%
rename from docs/gallery/app_showcase.md
rename to docs/community/app_showcase.md
index 538aa0fb1b..cc9d34d733 100644
--- a/docs/gallery/app_showcase.md
+++ b/docs/community/app_showcase.md
@@ -1,4 +1,4 @@
-# 😎 App Showcase
+# App Showcase
 
 Here is a sample of some of the incredible applications and tools built on top of LlamaIndex! 
 
diff --git a/docs/community/integrations.md b/docs/community/integrations.md
new file mode 100644
index 0000000000..f941750401
--- /dev/null
+++ b/docs/community/integrations.md
@@ -0,0 +1,15 @@
+# Integrations
+
+LlamaIndex has a number of community integrations, from vector stores, to prompt trackers, tracers, and more!
+
+```{toctree}
+---
+maxdepth: 1
+---
+integrations/graphsignal.md
+integrations/guidance.md
+integrations/trulens.md
+integrations/chatgpt_plugins.md
+integrations/using_with_langchain.md
+integrations/vector_stores.md
+```
\ No newline at end of file
diff --git a/docs/how_to/integrations/chatgpt_plugins.md b/docs/community/integrations/chatgpt_plugins.md
similarity index 100%
rename from docs/how_to/integrations/chatgpt_plugins.md
rename to docs/community/integrations/chatgpt_plugins.md
diff --git a/docs/how_to/integrations/graphsignal.md b/docs/community/integrations/graphsignal.md
similarity index 100%
rename from docs/how_to/integrations/graphsignal.md
rename to docs/community/integrations/graphsignal.md
diff --git a/docs/how_to/integrations/guidance.md b/docs/community/integrations/guidance.md
similarity index 100%
rename from docs/how_to/integrations/guidance.md
rename to docs/community/integrations/guidance.md
diff --git a/docs/how_to/integrations/trulens.md b/docs/community/integrations/trulens.md
similarity index 100%
rename from docs/how_to/integrations/trulens.md
rename to docs/community/integrations/trulens.md
diff --git a/docs/how_to/integrations/using_with_langchain.md b/docs/community/integrations/using_with_langchain.md
similarity index 97%
rename from docs/how_to/integrations/using_with_langchain.md
rename to docs/community/integrations/using_with_langchain.md
index 34cd13677a..ddbbfe2582 100644
--- a/docs/how_to/integrations/using_with_langchain.md
+++ b/docs/community/integrations/using_with_langchain.md
@@ -2,7 +2,7 @@
 
 LlamaIndex provides both Tool abstractions for a Langchain agent as well as a memory module.
 
-The API reference of the Tool abstractions + memory modules are [here](/reference/langchain_integrations/base.rst).
+The API reference of the Tool abstractions + memory modules are [here](/api_reference/langchain_integrations/base.rst).
 
 ### Use any data loader as a Langchain Tool
 
diff --git a/docs/how_to/integrations/vector_stores.md b/docs/community/integrations/vector_stores.md
similarity index 99%
rename from docs/how_to/integrations/vector_stores.md
rename to docs/community/integrations/vector_stores.md
index 8618207a5d..7a29b70a4f 100644
--- a/docs/how_to/integrations/vector_stores.md
+++ b/docs/community/integrations/vector_stores.md
@@ -26,7 +26,7 @@ as the storage backend for `VectorStoreIndex`.
 - MongoDB Atlas (`MongoDBAtlasVectorSearch`). [Installation/Quickstart] (https://www.mongodb.com/atlas/database).
 - Redis (`RedisVectorStore`). [Installation](https://redis.io/docs/getting-started/installation/).
 
-A detailed API reference is [found here](/reference/indices/vector_store.rst).
+A detailed API reference is [found here](/api_reference/indices/vector_store.rst).
 
 Similar to any other index within LlamaIndex (tree, keyword table, list), `VectorStoreIndex` can be constructed upon any collection
 of documents. We use the vector store within the index to store embeddings for the input text chunks.
diff --git a/docs/core_modules/data_modules/connector/modules.md b/docs/core_modules/data_modules/connector/modules.md
new file mode 100644
index 0000000000..f228d879dc
--- /dev/null
+++ b/docs/core_modules/data_modules/connector/modules.md
@@ -0,0 +1,31 @@
+# Module Guides
+
+
+```{toctree}
+---
+maxdepth: 1
+---
+../../../examples/data_connectors/PsychicDemo.ipynb
+../../../examples/data_connectors/DeepLakeReader.ipynb
+../../../examples/data_connectors/QdrantDemo.ipynb
+../../../examples/data_connectors/DiscordDemo.ipynb
+../../../examples/data_connectors/MongoDemo.ipynb
+../../../examples/data_connectors/ChromaDemo.ipynb
+../../../examples/data_connectors/MyScaleReaderDemo.ipynb
+../../../examples/data_connectors/FaissDemo.ipynb
+../../../examples/data_connectors/ObsidianReaderDemo.ipynb
+../../../examples/data_connectors/SlackDemo.ipynb
+../../../examples/data_connectors/WebPageDemo.ipynb
+../../../examples/data_connectors/PineconeDemo.ipynb
+../../../examples/data_connectors/MboxReaderDemo.ipynb
+../../../examples/data_connectors/MilvusReaderDemo.ipynb
+../../../examples/data_connectors/NotionDemo.ipynb
+../../../examples/data_connectors/GithubRepositoryReaderDemo.ipynb
+../../../examples/data_connectors/GoogleDocsDemo.ipynb
+../../../examples/data_connectors/DatabaseReaderDemo.ipynb
+../../../examples/data_connectors/TwitterDemo.ipynb
+../../../examples/data_connectors/WeaviateDemo.ipynb
+../../../examples/data_connectors/MakeDemo.ipynb
+../../../examples/data_connectors/deplot/DeplotReader.ipynb
+```
+
diff --git a/docs/how_to/connector/root.md b/docs/core_modules/data_modules/connector/root.md
similarity index 80%
rename from docs/how_to/connector/root.md
rename to docs/core_modules/data_modules/connector/root.md
index 0f7db575bc..ceed552efc 100644
--- a/docs/how_to/connector/root.md
+++ b/docs/core_modules/data_modules/connector/root.md
@@ -1,9 +1,12 @@
-# 🔌 Data Connectors (LlamaHub)
+# Data Connectors (LlamaHub)
 
 ## Concept
 A data connector (i.e. `Reader`) ingest data from different data sources and data formats into a simple `Document` representation (text and simple metadata).
 
-Once you've ingested your data, you can build an [Index](/how_to/index/root.md) on top, ask questions using a [Query Engine](/how_to/query_engine/root.md), and have a conversation using a [Chat Engine](/how_to/chat_engine/root.md).
+```{tip}
+Once you've ingested your data, you can build an [Index](/core_modules/data_modules/index/root.md) on top, ask questions using a [Query Engine](/core_modules/query_modules/query_engine/root.md), and have a conversation using a [Chat Engine](/core_modules/query_modules/chat_engines/root.md).
+```
+
 ## LlamaHub
 Our data connectors are offered through [LlamaHub](https://llamahub.ai/) 🦙. 
 LlamaHub is an open-source repository containing data loaders that you can easily plug and play into any LlamaIndex application.
diff --git a/docs/how_to/connector/usage_pattern.md b/docs/core_modules/data_modules/connector/usage_pattern.md
similarity index 100%
rename from docs/how_to/connector/usage_pattern.md
rename to docs/core_modules/data_modules/connector/usage_pattern.md
diff --git a/docs/core_modules/data_modules/documents_and_nodes/root.md b/docs/core_modules/data_modules/documents_and_nodes/root.md
new file mode 100644
index 0000000000..d88e7c941f
--- /dev/null
+++ b/docs/core_modules/data_modules/documents_and_nodes/root.md
@@ -0,0 +1,64 @@
+# Documents / Nodes
+
+## Concept
+
+Document and Node objects are core abstractions within LlamaIndex.
+
+A **Document** is a generic container around any data source - for instance, a PDF, an API output, or retrieved data from a database. They can be constructed manually, or created automatically via our data loaders. By default, a Document stores text along with some other attributes. Some of these are listed below.
+- `metadata` - a dictionary of annotations that can be appended to the text.
+- `relationships` - a dictionary containing relationships to other Documents/Nodes.
+
+*Note*: We have beta support for allowing Documents to store images, and are actively working on improving its multimodal capabilities.
+
+A **Node** represents a "chunk" of a source Document, whether that is a text chunk, an image, or other. Similar to Documents, they contain metadata and relationship information with other nodes.
+
+Nodes are a first-class citizen in LlamaIndex. You can choose to define Nodes and all its attributes directly. You may also choose to "parse" source Documents into Nodes through our `NodeParser` classes. By default every Node derived from a Document will inherit the same metadata from that Document (e.g. a "file_name" filed in the Document is propagated to every Node).
+
+
+## Usage Pattern
+
+Here are some simple snippets to get started with Documents and Nodes.
+
+#### Documents
+
+```python
+from llama_index import Document, VectorStoreIndex
+
+text_list = [text1, text2, ...]
+documents = [Document(text=t) for t in text_list]
+
+# build index
+index = VectorStoreIndex.from_documents(documents)
+
+```
+
+#### Nodes
+```python
+
+from llama_index.node_parser import SimpleNodeParser
+
+# load documents
+...
+
+# parse nodes
+parser = SimpleNodeParser()
+nodes = parser.get_nodes_from_documents(documents)
+
+# build index
+index = VectorStoreIndex(nodes)
+
+```
+
+### Document/Node Usage
+
+Take a look at our in-depth guides for more details on how to use Documents/Nodes.
+
+```{toctree}
+---
+maxdepth: 1
+---
+usage_documents.md
+usage_nodes.md
+usage_metadata_extractor.md
+```
+
diff --git a/docs/how_to/customization/custom_documents.md b/docs/core_modules/data_modules/documents_and_nodes/usage_documents.md
similarity index 86%
rename from docs/how_to/customization/custom_documents.md
rename to docs/core_modules/data_modules/documents_and_nodes/usage_documents.md
index 1ba16f4fb8..f2ad97bbf2 100644
--- a/docs/how_to/customization/custom_documents.md
+++ b/docs/core_modules/data_modules/documents_and_nodes/usage_documents.md
@@ -1,8 +1,32 @@
-# Customizing Documents
+# Defining and Customizing Documents
+
+
+## Defining Documents
+
+Documents can either be created automatically via data loaders, or constructed manually.
+
+By default, all of our [data loaders](/core_modules/data_modules/connector/root.md) (including those offered on LlamaHub) return `Document` objects through the `load_data` function.
+
+```python
+from llama_index import SimpleDirectoryReader
+
+documents = SimpleDirectoryReader('./data').load_data()
+```
+
+You can also choose to construct documents manually. LlamaIndex exposes the `Document` struct.
+
+```python
+from llama_index import Document
+
+text_list = [text1, text2, ...]
+documents = [Document(text=t) for t in text_list]
+```
+
+## Customizing Documents
 
 This section covers various ways to customize `Document` objects. Since the `Document` object is a subclass of our `TextNode` object, all these settings and details apply to the `TextNode` object class as well.
 
-## Metadata
+### Metadata
 
 Documents also offer the chance to include useful metadata. Using the `metadata` dictionary on each document, additional information can be included to help inform responses and track down sources for query responses. This information can be anything, such as filenames or categories. If you are intergrating with a vector database, keep in mind that some vector databases require that the keys must be strings, and the values must be flat (either `str`, `float`, or `int`).
 
@@ -38,7 +62,7 @@ filename_fn = lambda filename: {'file_name': filename}
 documents = SimpleDirectoryReader('./data', file_metadata=filename_fn)
 ```
 
-## Customizing the id
+### Customizing the id
 
 As detailed in the section [Document Management](../index/document_management.md), the doc `id_` is used to enable effecient refreshing of documents in the index. When using the `SimpleDirectoryReader`, you can automatically set the doc `id_` to be the full path to each document:
 
@@ -55,11 +79,11 @@ You can also set the `id_` of any `Document` or `TextNode` directly!
 document.id_ = "My new document id!"
 ```
 
-## Advanced - Metadata Customization
+### Advanced - Metadata Customization
 
 A key detail mentioned above is that by default, any metadata you set is included in the embeddings generation and LLM.
 
-### Customizing LLM Metadata Text
+#### Customizing LLM Metadata Text
 
 Typically, a document might have many metadata keys, but you might not want all of them visibile to the LLM during response synthesis. In the above examples, we may not want the LLM to read the `file_name` of our document. However, the `file_name` might include information that will help generate better embeddings. A key advantage of doing this is to bias the embeddings for retrieval without changing what the LLM ends up reading. 
 
@@ -76,7 +100,7 @@ from llama_index.schema import MetadataMode
 print(document.get_content(metadata_mode=MetadataMode.LLM))
 ```
 
-### Customizing Embedding Metadata Text
+#### Customizing Embedding Metadata Text
 
 Similar to customing the metadata visibile to the LLM, we can also customize the metadata visible to emebddings. In this case, you can specifically exclude metadata visible to the embedding model, in case you DON'T want particular text to bias the embeddings.
 
@@ -91,7 +115,7 @@ from llama_index.schema import MetadataMode
 print(document.get_content(metadata_mode=MetadataMode.EMBED))
 ```
 
-### Customizing Metadata Format
+#### Customizing Metadata Format
 
 As you know by now, metadata is injected into the actual text of each document/node when sent to the LLM or embedding model. By default, the format of this metadata is controlled by three attributes:
 
@@ -133,7 +157,7 @@ print("The Embedding model sees this: \n", document.get_content(metadata_mode=Me
 ```
 
 
-## Advanced - Automatic Metadata Extraction
+### Advanced - Automatic Metadata Extraction
 
 We have initial examples of using LLMs themselves to perform metadata extraction.
 
diff --git a/docs/core_modules/data_modules/documents_and_nodes/usage_metadata_extractor.md b/docs/core_modules/data_modules/documents_and_nodes/usage_metadata_extractor.md
new file mode 100644
index 0000000000..f1eda5260c
--- /dev/null
+++ b/docs/core_modules/data_modules/documents_and_nodes/usage_metadata_extractor.md
@@ -0,0 +1,43 @@
+# Automated Metadata Extraction for Nodes
+
+You can use LLMs to automate metadata extraction with our `MetadataExtractor` modules.
+
+Our metadata extractor modules include the following "feature extractors":
+- `SummaryExtractor` - automatically extracts a summary over a set of Nodes
+- `QuestionsAnsweredExtractor` - extracts a set of questions that each Node can answer
+- `TitleExtractor` - extracts a title over the context of each Node
+
+You can use these feature extractors within our overall `MetadataExtractor` class. Then you can plug in the `MetadataExtractor` into our node parser:
+
+```python
+from llama_index.node_parser.extractors import (
+    MetadataExtractor,
+    TitleExtractor,
+    QuestionsAnsweredExtractor
+)
+from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
+
+text_splitter = TokenTextSplitter(separator=" ", chunk_size=512, chunk_overlap=128)
+metadata_extractor = MetadataExtractor(
+    extractors=[
+        TitleExtractor(nodes=5),
+        QuestionsAnsweredExtractor(questions=3),
+    ],
+)
+
+node_parser = SimpleNodeParser(
+    text_splitter=text_splitter,
+    metadata_extractor=metadata_extractor,
+)
+# assume documents are defined -> extract nodes
+nodes = node_parser.get_nodes_from_documents(documents)
+```
+
+
+```{toctree}
+---
+caption: Metadata Extraction Guides
+maxdepth: 1
+---
+/examples/metadata_extraction/MetadataExtractionSEC.ipynb
+```
\ No newline at end of file
diff --git a/docs/core_modules/data_modules/documents_and_nodes/usage_nodes.md b/docs/core_modules/data_modules/documents_and_nodes/usage_nodes.md
new file mode 100644
index 0000000000..39154812d5
--- /dev/null
+++ b/docs/core_modules/data_modules/documents_and_nodes/usage_nodes.md
@@ -0,0 +1,35 @@
+# Defining and Customizing Nodes
+
+Nodes represent "chunks" of source Documents, whether that is a text chunk, an image, or more. They also contain metadata and relationship information
+with other nodes and index structures.
+
+Nodes are a first-class citizen in LlamaIndex. You can choose to define Nodes and all its attributes directly. You may also choose to "parse" source Documents into Nodes through our `NodeParser` classes.
+
+For instance, you can do
+
+```python
+from llama_index.node_parser import SimpleNodeParser
+
+parser = SimpleNodeParser()
+
+nodes = parser.get_nodes_from_documents(documents)
+```
+
+You can also choose to construct Node objects manually and skip the first section. For instance,
+
+```python
+from llama_index.schema import TextNode, NodeRelationship, RelatedNodeInfo
+
+node1 = TextNode(text="<text_chunk>", id_="<node_id>")
+node2 = TextNode(text="<text_chunk>", id_="<node_id>")
+# set relationships
+node1.relationships[NodeRelationship.NEXT] = RelatedNodeInfo(node_id=node2.node_id)
+node2.relationships[NodeRelationship.PREVIOUS] = RelatedNodeInfo(node_id=node1.node_id)
+nodes = [node1, node2]
+```
+
+The `RelatedNodeInfo` class can also store additional `metadata` if needed:
+
+```python
+node2.relationships[NodeRelationship.PARENT] = RelatedNodeInfo(node_id=node1.node_id, metadata={"key": "val"})
+```
\ No newline at end of file
diff --git a/docs/how_to/index/composability.md b/docs/core_modules/data_modules/index/composability.md
similarity index 94%
rename from docs/how_to/index/composability.md
rename to docs/core_modules/data_modules/index/composability.md
index 9102a69b80..0ba20fd062 100644
--- a/docs/how_to/index/composability.md
+++ b/docs/core_modules/data_modules/index/composability.md
@@ -85,7 +85,7 @@ The default query engine for each index is called under the hood (i.e. `index.as
 Below we show an example that configure the tree index retrievers to use `child_branch_factor=2` (instead of the default `child_branch_factor=1`).
 
 
-More detail on how to configure `ComposableGraphQueryEngine` can be found [here](/reference/query/query_engines/graph_query_engine.rst).
+More detail on how to configure `ComposableGraphQueryEngine` can be found [here](/api_reference/query/query_engines/graph_query_engine.rst).
 
 
 ```python
@@ -150,7 +150,7 @@ We can take a look at a code example below as well. We first build two tree indi
 caption: Examples
 maxdepth: 1
 ---
-../../examples/composable_indices/ComposableIndices-Prior.ipynb
-../../examples/composable_indices/ComposableIndices-Weaviate.ipynb
-../../examples/composable_indices/ComposableIndices.ipynb
+../../../../examples/composable_indices/ComposableIndices-Prior.ipynb
+../../../../examples/composable_indices/ComposableIndices-Weaviate.ipynb
+../../../../examples/composable_indices/ComposableIndices.ipynb
 ```
\ No newline at end of file
diff --git a/docs/how_to/index/document_management.md b/docs/core_modules/data_modules/index/document_management.md
similarity index 100%
rename from docs/how_to/index/document_management.md
rename to docs/core_modules/data_modules/index/document_management.md
diff --git a/docs/guides/primer/index_guide.md b/docs/core_modules/data_modules/index/index_guide.md
similarity index 100%
rename from docs/guides/primer/index_guide.md
rename to docs/core_modules/data_modules/index/index_guide.md
diff --git a/docs/how_to/index/index_progress_bars.ipynb b/docs/core_modules/data_modules/index/index_progress_bars.ipynb
similarity index 100%
rename from docs/how_to/index/index_progress_bars.ipynb
rename to docs/core_modules/data_modules/index/index_progress_bars.ipynb
diff --git a/docs/how_to/index/metadata_extraction.md b/docs/core_modules/data_modules/index/metadata_extraction.md
similarity index 100%
rename from docs/how_to/index/metadata_extraction.md
rename to docs/core_modules/data_modules/index/metadata_extraction.md
diff --git a/docs/how_to/index/modules.md b/docs/core_modules/data_modules/index/modules.md
similarity index 68%
rename from docs/how_to/index/modules.md
rename to docs/core_modules/data_modules/index/modules.md
index 6e50537375..4402791d30 100644
--- a/docs/how_to/index/modules.md
+++ b/docs/core_modules/data_modules/index/modules.md
@@ -5,12 +5,11 @@
 maxdepth: 1
 ---
 vector_store_guide.ipynb
-List Index </guides/primer/index_guide.md>
-Tree Index </guides/primer/index_guide.md>
-Keyword Table Index </guides/primer/index_guide.md>
+List Index <./index_guide.md>
+Tree Index <./index_guide.md>
+Keyword Table Index <./index_guide.md>
 /examples/index_structs/knowledge_graph/KnowledgeGraphDemo.ipynb
 /examples/index_structs/knowledge_graph/KnowledgeGraphIndex_vs_VectorStoreIndex_vs_CustomIndex_combined.ipynb
-/examples/index_structs/struct_indices/PandasIndexDemo.ipynb
 SQL Index </examples/index_structs/struct_indices/SQLIndexDemo.ipynb>
 /examples/index_structs/struct_indices/duckdb_sql_query.ipynb
 /examples/index_structs/doc_summary/DocSummary.ipynb
diff --git a/docs/how_to/index/root.md b/docs/core_modules/data_modules/index/root.md
similarity index 58%
rename from docs/how_to/index/root.md
rename to docs/core_modules/data_modules/index/root.md
index 0fee23ca86..e2fe340dfd 100644
--- a/docs/how_to/index/root.md
+++ b/docs/core_modules/data_modules/index/root.md
@@ -1,15 +1,24 @@
-# 🗃️ Data Index
+# Indexes
 
 ## Concept
 An `Index` is a data structure that allows us to quickly retrieve relevant context for a user query.
 For LlamaIndex, it's the core foundation for retrieval-augmented generation (RAG) use-cases.
 
 
-At a high-level, `Indices` are built from [Documents](/how_to/connector/root.md).
-They are used to build [Query Engines](/how_to/query_engine/root.md) and [Chat Engines](/how_to/chat_engine/root.md)
+At a high-level, `Indices` are built from [Documents](/core_modules/data_modules/documents_and_nodes/root.md).
+They are used to build [Query Engines](/core_modules/query_modules/query_engine/root.md) and [Chat Engines](/core_modules/query_modules/chat_engines/root.md)
 which enables question & answer and chat over your data.  
 
-Under the hood, `Indices` store data in `Node` objects (which represent chunks of the original documents), and expose an [Retriever](/how_to/retriever/root.md) interface that supports additional configuration and automation.
+Under the hood, `Indices` store data in `Node` objects (which represent chunks of the original documents), and expose an [Retriever](/core_modules/query_modules/retriever/root.md) interface that supports additional configuration and automation.
+
+For a more in-depth explanation, check out our guide below:
+```{toctree}
+---
+maxdepth: 1
+---
+index_guide.md
+```
+
 
 
 ## Usage Pattern
diff --git a/docs/how_to/index/usage_pattern.md b/docs/core_modules/data_modules/index/usage_pattern.md
similarity index 95%
rename from docs/how_to/index/usage_pattern.md
rename to docs/core_modules/data_modules/index/usage_pattern.md
index b2389c4271..66f9af93d4 100644
--- a/docs/how_to/index/usage_pattern.md
+++ b/docs/core_modules/data_modules/index/usage_pattern.md
@@ -10,7 +10,9 @@ from llama_index import VectorStoreIndex
 index = VectorStoreIndex.from_documents(docs)
 ```
 
-> Note: To learn how to load documents, see [Data Connectors](/how_to/connector/root.md)
+```{tip}
+To learn how to load documents, see [Data Connectors](/core_modules/data_modules/connector/root.md)
+```
 
 ### What is happening under the hood?
 
diff --git a/docs/how_to/index/vector_store_guide.ipynb b/docs/core_modules/data_modules/index/vector_store_guide.ipynb
similarity index 100%
rename from docs/how_to/index/vector_store_guide.ipynb
rename to docs/core_modules/data_modules/index/vector_store_guide.ipynb
diff --git a/docs/core_modules/data_modules/node_parsers/root.md b/docs/core_modules/data_modules/node_parsers/root.md
new file mode 100644
index 0000000000..d748210198
--- /dev/null
+++ b/docs/core_modules/data_modules/node_parsers/root.md
@@ -0,0 +1,24 @@
+# Node Parser
+
+## Concept
+
+Node parsers are a simple abstraction that take a list of documents, and chunk them into `Node` objects, such that each node is a specific size. When a document is broken into nodes, all of it's attributes are inherited to the children nodes (i.e. `metadata`, text and metadata templates, etc.). You can read more about `Node` and `Document` properies [here](/core_modules/data_modules/documents_and_nodes/root.md).
+
+A node parser can configure the chunk size (in tokens) as well as any overlap between chunked nodes. The chunking is done by using a `TokenTextSplitter`, which default to a chunk size of 1024 and a default chunk overlap of 20 tokens.
+
+## Usage Pattern
+
+```python
+from llama_index.node_parser import SimpleNodeParser
+
+node_parser = SimpleNodeParser.from_defaults(chunk_size=1024, chunk_overlap=20)
+```
+
+You can find more usage details and availbale customization options below.
+
+```{toctree}
+---
+maxdepth: 1
+---
+usage_pattern.md
+```
diff --git a/docs/core_modules/data_modules/node_parsers/usage_pattern.md b/docs/core_modules/data_modules/node_parsers/usage_pattern.md
new file mode 100644
index 0000000000..4662aa47be
--- /dev/null
+++ b/docs/core_modules/data_modules/node_parsers/usage_pattern.md
@@ -0,0 +1,80 @@
+# Usage Pattern
+
+## Getting Started
+
+Node parsers can be used on their own:
+
+```python
+from llama_index import Document
+from llama_index.node_parser import SimpleNodeParser
+
+node_parser = SimpleNodeParser.from_defaults(chunk_size=1024, chunk_overlap=20)
+
+nodes = node_parser.get_nodes_from_documents([Document(text="long text")], show_progress=False)
+```
+
+Or set inside a `ServiceContext` to be used automatically when an index is constructed using `.from_documents()`:
+
+```python
+from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext
+from llama_index.node_parser import SimpleNodeParser
+
+documents = SimpleDirectoryReader("./data").load_data()
+
+node_parser = SimpleNodeParser.from_defaults(chunk_size=1024, chunk_overlap=20)
+service_context = ServiceContext.from_defaults(node_parser=node_parser)
+
+index = VectorStoreIndex.from_documents(documents, service_context=service_context)
+```
+
+## Customization
+
+There are several options available to customize:
+
+- `text_spliiter` (defaults to `TokenTextSplitter`) - the text splitter used to split text into chunks.
+- `include_metadata` (defaults to `True`) - whether or not `Node`s should inherit the document metadata.
+- `include_prev_next_rel` (defaults to `True`) - whether or not to include previous/next relationships between chunked `Node`s
+- `metadata_extractor` (defaults to `None`) - extra processing to extract helpful metadata. See [here for details](/core_modules/data_modules/documents_and_nodes/usage_metadata_extractor.md).
+
+If you don't want to change the `text_splitter`, you can use `SimpleNodeParser.from_defaults()` to easily change the chunk size and chunk overlap. The defaults are 1024 and 20 respectively.
+
+```python
+from llama_index.node_parser import SimpleNodeParser
+
+node_parser = SimpleNodeParser.from_defaults(chunk_size=1024, chunk_overlap=20)
+```
+
+### Text Splitter Customization
+
+If you do customize the `text_splitter` from the default `TokenTextSplitter`, you can use any splitter from langchain, or optionally our `SentenceSplitter`. Each text splitter has options for the default seperator, as well as options for backup seperators. These are useful for languages that are sufficiently different from English.
+
+`TokenTextSplitter` configuration:
+
+```python
+from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
+
+text_splitter = TokenTextSplitter(
+  seperator=" ",
+  chunk_size=1024,
+  chunk_overlap=20,
+  backup_seperators=["\n"]
+)
+
+node_parser = SimpleNodeParser(text_splitter=text_splitter)
+```
+
+`SentenceSplitter` configuration:
+
+```python
+from llama_index.langchain_helpers.text_splitter import SentenceSplitter
+
+text_splitter = SentenceSplitter(
+  seperator=" ",
+  chunk_size=1024,
+  chunk_overlap=20,
+  backup_seperators=["\n"],
+  paragraph_seperator="\n\n\n"
+)
+
+node_parser = SimpleNodeParser(text_splitter=text_splitter)
+```
diff --git a/docs/how_to/storage/customization.md b/docs/core_modules/data_modules/storage/customization.md
similarity index 96%
rename from docs/how_to/storage/customization.md
rename to docs/core_modules/data_modules/storage/customization.md
index c4d0092a6c..6b8d42088d 100644
--- a/docs/how_to/storage/customization.md
+++ b/docs/core_modules/data_modules/storage/customization.md
@@ -69,7 +69,7 @@ loaded_indicies = load_index_from_storage(storage_context, index_ids=["<index_id
 ```
 
 You can customize the underlying storage with a one-line change to instantiate different document stores, index stores, and vector stores.
-See [Document Stores](/how_to/storage/docstores.md), [Vector Stores](/how_to/storage/vector_stores.md), [Index Stores](/how_to/storage/index_stores.md) guides for more details.
+See [Document Stores](./docstores.md), [Vector Stores](./vector_stores.md), [Index Stores](./index_stores.md) guides for more details.
 
 For saving and loading a graph/composable index, see the [full guide here](../index/composability.md).
 
diff --git a/docs/how_to/storage/docstores.md b/docs/core_modules/data_modules/storage/docstores.md
similarity index 97%
rename from docs/how_to/storage/docstores.md
rename to docs/core_modules/data_modules/storage/docstores.md
index cd1e0a06ea..7e64eb18b3 100644
--- a/docs/how_to/storage/docstores.md
+++ b/docs/core_modules/data_modules/storage/docstores.md
@@ -1,7 +1,7 @@
 # Document Stores
 Document stores contain ingested document chunks, which we call `Node` objects.
 
-See the [API Reference](/reference/storage/docstore.rst) for more details.
+See the [API Reference](/api_reference/storage/docstore.rst) for more details.
 
 
 ### Simple Document Store
diff --git a/docs/how_to/storage/index_stores.md b/docs/core_modules/data_modules/storage/index_stores.md
similarity index 97%
rename from docs/how_to/storage/index_stores.md
rename to docs/core_modules/data_modules/storage/index_stores.md
index 62443ebb51..9a09614387 100644
--- a/docs/how_to/storage/index_stores.md
+++ b/docs/core_modules/data_modules/storage/index_stores.md
@@ -2,7 +2,7 @@
 
 Index stores contains lightweight index metadata (i.e. additional state information created when building an index).
 
-See the [API Reference](/reference/storage/index_store.rst) for more details.
+See the [API Reference](/api_reference/storage/index_store.rst) for more details.
 
 ### Simple Index Store
 By default, LlamaIndex uses a simple index store backed by an in-memory key-value store.
diff --git a/docs/how_to/storage/kv_stores.md b/docs/core_modules/data_modules/storage/kv_stores.md
similarity index 67%
rename from docs/how_to/storage/kv_stores.md
rename to docs/core_modules/data_modules/storage/kv_stores.md
index 9393078a37..5e03ed535b 100644
--- a/docs/how_to/storage/kv_stores.md
+++ b/docs/core_modules/data_modules/storage/kv_stores.md
@@ -1,11 +1,11 @@
 # Key-Value Stores
 
-Key-Value stores are the underlying storage abstractions that power our [Document Stores](/how_to/storage/docstores.md) and [Index Stores](/how_to/storage/index_stores.md).
+Key-Value stores are the underlying storage abstractions that power our [Document Stores](./docstores.md) and [Index Stores](./index_stores.md).
 
 We provide the following key-value stores:
 - **Simple Key-Value Store**: An in-memory KV store. The user can choose to call `persist` on this kv store to persist data to disk.
 - **MongoDB Key-Value Store**: A MongoDB KV store.
 
-See the [API Reference](/reference/storage/kv_store.rst) for more details.
+See the [API Reference](/api_reference/storage/kv_store.rst) for more details.
 
 Note: At the moment, these storage abstractions are not externally facing.
diff --git a/docs/core_modules/data_modules/storage/root.md b/docs/core_modules/data_modules/storage/root.md
new file mode 100644
index 0000000000..e49c2f9f2d
--- /dev/null
+++ b/docs/core_modules/data_modules/storage/root.md
@@ -0,0 +1,91 @@
+# Storage
+
+## Concept
+
+LlamaIndex provides a high-level interface for ingesting, indexing, and querying your external data.
+
+Under the hood, LlamaIndex also supports swappable **storage components** that allows you to customize:
+
+- **Document stores**: where ingested documents (i.e., `Node` objects) are stored,
+- **Index stores**: where index metadata are stored,
+- **Vector stores**: where embedding vectors are stored.
+
+The Document/Index stores rely on a common Key-Value store abstraction, which is also detailed below.
+
+LlamaIndex supports persisting data to any storage backend supported by [fsspec](https://filesystem-spec.readthedocs.io/en/latest/index.html). 
+We have confirmed support for the following storage backends:
+
+- Local filesystem
+- AWS S3
+- Cloudflare R2
+
+
+![](/_static/storage/storage.png)
+
+## Usage Pattern
+
+Many vector stores (except FAISS) will store both the data as well as the index (embeddings). This means that you will not need to use a separate document store or index store. This *also* means that you will not need to explicitly persist this data - this happens automatically. Usage would look something like the following to build a new index / reload an existing one.
+
+```python
+
+## build a new index
+from llama_index import VectorStoreIndex, StorageContext
+from llama_index.vector_stores import DeepLakeVectorStore
+# construct vector store and customize storage context
+vector_store = DeepLakeVectorStore(dataset_path="<dataset_path>")
+storage_context = StorageContext.from_defaults(
+    vector_store = vector_store
+)
+# Load documents and build index
+index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
+
+
+## reload an existing one
+index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
+```
+
+See our [Vector Store Module Guide](vector_stores.md) below for more details.
+
+
+Note that in general to use storage abstractions, you need to define a `StorageContext` object:
+
+```python
+from llama_index.storage.docstore import SimpleDocumentStore
+from llama_index.storage.index_store import SimpleIndexStore
+from llama_index.vector_stores import SimpleVectorStore
+from llama_index.storage import StorageContext
+
+# create storage context using default stores
+storage_context = StorageContext.from_defaults(
+    docstore=SimpleDocumentStore(),
+    vector_store=SimpleVectorStore(),
+    index_store=SimpleIndexStore(),
+)
+```
+
+More details on customization/persistence can be found in the guides below.
+
+
+```{toctree}
+---
+maxdepth: 1
+---
+customization.md
+save_load.md
+```
+
+
+
+## Modules
+
+We offer in-depth guides on the different storage components.
+
+```{toctree}
+---
+maxdepth: 1
+---
+vector_stores.md
+docstores.md
+index_stores.md
+kv_stores.md
+```
diff --git a/docs/how_to/storage/save_load.md b/docs/core_modules/data_modules/storage/save_load.md
similarity index 97%
rename from docs/how_to/storage/save_load.md
rename to docs/core_modules/data_modules/storage/save_load.md
index bc7c20a3f4..426fce42cc 100644
--- a/docs/how_to/storage/save_load.md
+++ b/docs/core_modules/data_modules/storage/save_load.md
@@ -44,7 +44,7 @@ indices = load_indices_from_storage(storage_context, index_ids=[index_id1, ...])
 graph = load_graph_from_storage(storage_context, root_id="<root_id>") # loads graph with the specified root_id
 ```
 
-Here's the full [API Reference on saving and loading](/reference/storage/indices_save_load.rst).
+Here's the full [API Reference on saving and loading](/api_reference/storage/indices_save_load.rst).
 
 ## Using a remote backend
 
diff --git a/docs/core_modules/data_modules/storage/vector_stores.md b/docs/core_modules/data_modules/storage/vector_stores.md
new file mode 100644
index 0000000000..35eb391609
--- /dev/null
+++ b/docs/core_modules/data_modules/storage/vector_stores.md
@@ -0,0 +1,65 @@
+# Vector Stores
+
+Vector stores contain embedding vectors of ingested document chunks 
+(and sometimes the document chunks as well).
+
+## Simple Vector Store
+By default, LlamaIndex uses a simple in-memory vector store that's great for quick experimentation.
+They can be persisted to (and loaded from) disk by calling `vector_store.persist()` (and `SimpleVectorStore.from_persist_path(...)` respectively).
+
+## Third-Party Vector Store Integrations
+We also integrate with a wide range of vector store implementations. 
+They mainly differ in 2 aspects:
+1. in-memory vs. hosted
+2. stores only vector embeddings vs. also stores documents
+
+### In-Memory Vector Stores
+* Faiss
+* Chroma
+
+### (Self) Hosted Vector Stores
+* Pinecone
+* Weaviate
+* Milvus/Zilliz
+* Qdrant
+* Chroma
+* Opensearch
+* DeepLake
+* MyScale
+* Tair
+* DocArray
+* MongoDB Atlas
+
+### Others
+* ChatGPTRetrievalPlugin
+
+For more details, see [Vector Store Integrations](/community/integrations/vector_stores.md).
+
+```{toctree}
+---
+caption: Examples
+maxdepth: 1
+---
+/examples/vector_stores/SimpleIndexDemo.ipynb
+/examples/vector_stores/QdrantIndexDemo.ipynb
+/examples/vector_stores/FaissIndexDemo.ipynb
+/examples/vector_stores/DeepLakeIndexDemo.ipynb
+/examples/vector_stores/MyScaleIndexDemo.ipynb
+/examples/vector_stores/MetalIndexDemo.ipynb
+/examples/vector_stores/WeaviateIndexDemo.ipynb
+/examples/vector_stores/OpensearchDemo.ipynb
+/examples/vector_stores/PineconeIndexDemo.ipynb
+/examples/vector_stores/ChromaIndexDemo.ipynb
+/examples/vector_stores/LanceDBIndexDemo.ipynb
+/examples/vector_stores/MilvusIndexDemo.ipynb
+/examples/vector_stores/RedisIndexDemo.ipynb
+/examples/vector_stores/WeaviateIndexDemo-Hybrid.ipynb
+/examples/vector_stores/PineconeIndexDemo-Hybrid.ipynb
+/examples/vector_stores/AsyncIndexCreationDemo.ipynb
+/examples/vector_stores/TairIndexDemo.ipynb
+/examples/vector_stores/SupabaseVectorIndexDemo.ipynb
+/examples/vector_stores/DocArrayHnswIndexDemo.ipynb
+/examples/vector_stores/DocArrayInMemoryIndexDemo.ipynb
+/examples/vector_stores/MongoDBAtlasVectorSearch.ipynb
+```
+
diff --git a/docs/core_modules/model_modules/embeddings/modules.md b/docs/core_modules/model_modules/embeddings/modules.md
new file mode 100644
index 0000000000..28292083a2
--- /dev/null
+++ b/docs/core_modules/model_modules/embeddings/modules.md
@@ -0,0 +1,13 @@
+# Modules
+
+We support integrations with OpenAI, Azure, and anything LangChain offers.
+
+```{toctree}
+---
+maxdepth: 1
+---
+/examples/embeddings/OpenAI.ipynb
+/examples/embeddings/Langchain.ipynb
+/examples/customization/llms/AzureOpenAI.ipynb
+/examples/embeddings/custom_embeddings.ipynb
+```
diff --git a/docs/core_modules/model_modules/embeddings/root.md b/docs/core_modules/model_modules/embeddings/root.md
new file mode 100644
index 0000000000..cafc81a118
--- /dev/null
+++ b/docs/core_modules/model_modules/embeddings/root.md
@@ -0,0 +1,42 @@
+# Embeddings
+
+## Concept
+Embeddings are used in LlamaIndex to represent your documents using a sophistacted numerical representation. Embedding models take text as input, and return a long list of numbers used to caputre the semantics of the text. These embedding models have been trained to represent text this way, and help enable many applications, including search!
+
+At a high level, if a user asks a question about dogs, then the embedding for that question will be highly similar to text that talks about dogs.
+
+When calculating the similarity between embeddings, there are many methods to use (dot product, cosine similarity, etc.). By default, LlamaIndex uses cosine similarity when comparing embeddings.
+
+There are many embedding models to pick from. By default, LlamaIndex uses `text-embedding-ada-002` from OpenAI. We also support any embedding model offered by Langchain [here](https://python.langchain.com/docs/modules/data_connection/text_embedding/), as well as providing an easy to extend base class for implementing your own embeddings.
+
+## Usage Pattern
+
+Most commonly in LlamaIndex, embedding models will be specified in the `ServiceContext` object, and then used in a vector index. The embedding model will be used to embed the documents used during index construction, as well as embedding any queries you make using the query engine later on.
+
+```python
+from llama_index import ServiceContext
+from llama_index.embeddings import OpenAIEmbedding
+
+embed_model = OpenAIEmbedding()
+service_context = serviceContext.from_defaults(embed_model=embed_model)
+```
+
+You can find more usage details and availbale customization options below.
+
+```{toctree}
+---
+maxdepth: 1
+---
+usage_pattern.md
+```
+
+## Modules
+
+We support integrations with OpenAI, Azure, and anything LangChain offers. Details below.
+
+```{toctree}
+---
+maxdepth: 1
+---
+modules.md
+```
diff --git a/docs/core_modules/model_modules/embeddings/usage_pattern.md b/docs/core_modules/model_modules/embeddings/usage_pattern.md
new file mode 100644
index 0000000000..0e389b9deb
--- /dev/null
+++ b/docs/core_modules/model_modules/embeddings/usage_pattern.md
@@ -0,0 +1,101 @@
+# Usage Pattern
+
+## Getting Started
+
+The most common usage for an embedding model will be setting it in the service context object, and then using it to construct an index and query. The input documents will be broken into nodes, and the emedding model will generate an embedding for each node.
+
+By default, LlamaIndex will use `text-embedding-ada-002`, which is what the example below manually sets up for you.
+
+```python
+from llama_index import ServiceContext, VectorStoreIndex, SimpleDirectoryReader
+from llama_index.embeddings import OpenAIEmbedding
+
+embed_model = OpenAIEmbedding()
+service_context = serviceContext.from_defaults(embed_model=embed_model)
+
+# optionally set a global service context to avoid passing it into other objects every time
+from llama_index import set_global_service_context
+set_global_service_context(service_context)
+
+documents = SimpleDirectoryReader("./data").load_data()
+
+index = VectorStoreIndex.from_documents(documents)
+```
+
+Then, at query time, the embedding model will be used again to embed the query text.
+
+```python
+query_engine = index.as_query_engine()
+
+response = query_engine.query("query string")
+```
+
+## Customization
+
+### Batch Size
+
+By default, embeddings requests are sent to OpenAI in batches of 10. For some users, this may (rarely) incur a rate limit. For other users embedding many documents, this batch size may be too small.
+
+```python
+# set the batch size to 42
+embed_model = OpenAIEmbedding(embed_batch_size=42)
+```
+
+### Embedding Model Integrations
+
+We also support any embeddings offered by Langchain [here](https://python.langchain.com/docs/modules/data_connection/text_embedding/), using our `LangchainEmbedding` wrapper class.
+
+The example below loads a model from Hugging Face, using Langchain's embedding class.
+
+```python
+from langchain.embeddings.huggingface import HuggingFaceEmbeddings
+from llama_index import LangchainEmbedding, ServiceContext
+
+embed_model = LangchainEmbedding(
+  HuggingFaceEmbeddings("sentence-transformers/all-mpnet-base-v2")
+)
+service_context = ServiceContext.from_defaults(embed_model=embed_model)
+```
+
+### Custom Embedding Model
+
+If you wanted to use embeddings not offered by LlamaIndex or Langchain, you can also extend our base embeddings class and implement your own!
+
+The example below uses Instructor Embeddings ([install/setup details here](https://huggingface.co/hkunlp/instructor-large)), and implements a custom embeddings class. Instructor embeddings work by providing text, as well as "instructions" on the domain of the text to embed. This is helpful when embedding text from a very specific and specialized topic.
+
+```python
+from typing import Any, List
+from InstructorEmbedding import INSTRUCTOR
+from llama_index.embeddings.base import BaseEmbedding
+
+class InstructorEmbeddings(BaseEmbedding):
+  def __init__(
+    self, 
+    instructor_model_name: str = "hkunlp/instructor-large",
+    instruction: str = "Represent the Computer Science documentation or question:",
+    **kwargs: Any,
+  ) -> None:
+    self._model = INSTRUCTOR(instructor_model_name)
+    self._instruction = instruction
+    super().__init__(**kwargs)
+
+    def _get_query_embedding(self, query: str) -> List[float]:
+      embeddings = model.encode([[self._instruction, query]])
+      return embeddings[0]
+
+    def _get_text_embedding(self, text: str) -> List[float]:
+      embeddings = model.encode([[self._instruction, text]])
+      return embeddings[0] 
+
+    def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
+      embeddings = model.encode([[self._instruction, text] for text in texts])
+      return embeddings
+```
+
+## Standalone Usage
+
+You can also use embeddings as a standalone module for your project, existing application, or general testing and exploration.
+
+```python
+embeddings = embed_model.get_text_embedding("It is raining cats and dogs here!")
+```
diff --git a/docs/core_modules/model_modules/llms/modules.md b/docs/core_modules/model_modules/llms/modules.md
new file mode 100644
index 0000000000..9101fa9539
--- /dev/null
+++ b/docs/core_modules/model_modules/llms/modules.md
@@ -0,0 +1,42 @@
+# Modules
+
+We support integrations with OpenAI, Hugging Face, PaLM, and more.
+
+## OpenAI
+```{toctree}
+---
+maxdepth: 1
+---
+/examples/llm/openai.ipynb
+/examples/llm/azure_openai.ipynb
+
+```
+
+## Hugging Face
+```{toctree}
+---
+maxdepth: 1
+---
+/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb
+/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb
+
+```
+
+
+## PaLM
+
+```{toctree}
+---
+maxdepth: 1
+---
+/examples/llm/palm.ipynb
+
+```
+
+## LangChain
+
+```{toctree}
+---
+maxdepth: 1
+---
+/examples/llm/langchain.ipynb
\ No newline at end of file
diff --git a/docs/core_modules/model_modules/llms/root.md b/docs/core_modules/model_modules/llms/root.md
new file mode 100644
index 0000000000..1e09eec31c
--- /dev/null
+++ b/docs/core_modules/model_modules/llms/root.md
@@ -0,0 +1,49 @@
+# LLM
+
+## Concept
+Picking the proper Large Language Model (LLM) is one of the first steps you need to consider when building any LLM application over your data.
+
+LLMs are a core component of LlamaIndex. They can be used as standalone modules or plugged into other core LlamaIndex modules (indices, retrievers, query engines). They are always used during the response synthesis step (e.g. after retrieval). Depending on the type of index being used, LLMs may also be used during index construction, insertion, and query traversal.
+
+LlamaIndex provides a unified interface for defining LLM modules, whether it's from OpenAI, Hugging Face, or LangChain, so that you 
+don't have to write the boilerplate code of defining the LLM interface yourself. This interface consists of the following (more details below):
+- Support for **text completion** and **chat** endpoints (details below)
+- Support for **streaming** and **non-streaming** endpoints
+- Support for **synchronous** and **asynchronous** endpoints
+
+
+## Usage Pattern
+
+The following code snippet shows how you can get started using LLMs.
+
+```python
+from llama_index.llms import OpenAI
+
+# non-streaming
+resp = OpenAI().complete('Paul Graham is ')
+print(resp)
+```
+
+You can use the LLM as a standalone module or with other LlamaIndex abstractions. Check out our guide below.
+
+```{toctree}
+---
+maxdepth: 1
+---
+usage_standalone.md
+usage_custom.md
+```
+
+
+## Modules
+
+We support integrations with OpenAI, Hugging Face, PaLM, and more.
+
+```{toctree}
+---
+maxdepth: 2
+---
+modules.md
+```
+
+
diff --git a/docs/how_to/customization/custom_llms.md b/docs/core_modules/model_modules/llms/usage_custom.md
similarity index 88%
rename from docs/how_to/customization/custom_llms.md
rename to docs/core_modules/model_modules/llms/usage_custom.md
index 551dd64a6b..5cd155a6fc 100644
--- a/docs/how_to/customization/custom_llms.md
+++ b/docs/core_modules/model_modules/llms/usage_custom.md
@@ -1,14 +1,9 @@
-# Defining LLMs
+# Customizing LLMs within LlamaIndex Abstractions
 
-The goal of LlamaIndex is to provide a toolkit of data structures that can organize external information in a manner that
-is easily compatible with the prompt limitations of an LLM. Therefore LLMs are always used to construct the final
-answer.
-Depending on the [type of index](/reference/indices.rst) being used,
-LLMs may also be used during index construction, insertion, and query traversal.
+You can plugin these LLM abstractions within our other modules in LlamaIndex (indexes, retrievers, query engines, agents) which allow you to build advanced workflows over your data.
 
 By default, we use OpenAI's `text-davinci-003` model. But you may choose to customize
 the underlying LLM being used.
-We support a growing collection of integrations, as well as LangChain's [LLM](https://python.langchain.com/en/latest/modules/models/llms.html) modules.
 
 Below we show a few examples of LLM customization. This includes
 
@@ -171,8 +166,8 @@ A full API reference can be found [here](../../reference/llm_predictor.rst).
 
 Several example notebooks are also listed below:
 
-- [StableLM](../../examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb)
-- [Camel](../../examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb)
+- [StableLM](/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb)
+- [Camel](/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb)
 
 ## Example: Using a Custom LLM Model - Advanced
 
@@ -249,16 +244,5 @@ Using this method, you can use any LLM. Maybe you have one running locally, or r
 
 Note that you may have to adjust the internal prompts to get good performance. Even then, you should be using a sufficiently large LLM to ensure it's capable of handling the complex queries that LlamaIndex uses internally, so your mileage may vary.
 
-A list of all default internal prompts is available [here](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/default_prompts.py), and chat-specific prompts are listed [here](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/chat_prompts.py). You can also implement your own custom prompts, as described [here](https://gpt-index.readthedocs.io/en/latest/how_to/customization/custom_prompts.html).
+A list of all default internal prompts is available [here](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/default_prompts.py), and chat-specific prompts are listed [here](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/chat_prompts.py). You can also implement your own custom prompts, as described [here](/core_modules/service_modules/prompts.md).
 
-```{toctree}
----
-caption: Examples
-maxdepth: 1
----
-
-../../examples/customization/llms/AzureOpenAI.ipynb
-../../examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb
-../../examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb
-../../examples/customization/llms/SimpleIndexDemo-ChatGPT.ipynb
-```
diff --git a/docs/core_modules/model_modules/llms/usage_standalone.md b/docs/core_modules/model_modules/llms/usage_standalone.md
new file mode 100644
index 0000000000..3beb398c11
--- /dev/null
+++ b/docs/core_modules/model_modules/llms/usage_standalone.md
@@ -0,0 +1,35 @@
+# Using LLMs as standalone modules
+
+You can use our LLM modules on their own.
+
+## Text Completion Example
+
+```python
+from llama_index.llms import OpenAI
+
+# non-streaming
+resp = OpenAI().complete('Paul Graham is ')
+print(resp)
+
+# using streaming endpoint
+from llama_index.llms import OpenAI
+llm = OpenAI()
+resp = llm.stream_complete('Paul Graham is ')
+for delta in resp:
+    print(delta, end='')
+```
+
+## Chat Example
+
+```python
+from llama_index.llms import ChatMessage, OpenAI
+
+messages = [
+    ChatMessage(role="system", content="You are a pirate with a colorful personality"),
+    ChatMessage(role="user", content="What is your name"),
+]
+resp = OpenAI().chat(messages)
+print(resp)
+```
+
+Check out our [modules section](modules.md) for usage guides for each LLM.
diff --git a/docs/how_to/customization/custom_prompts.md b/docs/core_modules/model_modules/prompts.md
similarity index 59%
rename from docs/how_to/customization/custom_prompts.md
rename to docs/core_modules/model_modules/prompts.md
index 12ecf8241c..ca551647e5 100644
--- a/docs/how_to/customization/custom_prompts.md
+++ b/docs/core_modules/model_modules/prompts.md
@@ -1,12 +1,19 @@
-# Defining Prompts
+# Prompts
+
+## Concept
 
 Prompting is the fundamental input that gives LLMs their expressive power. LlamaIndex uses prompts to build the index, do insertion, 
 perform traversal during querying, and to synthesize the final answer.
 
-LlamaIndex uses a set of [default prompt templates](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/default_prompts.py) that works well out of the box.
-Users may also provide their own prompt templates to further customize the behavior of the framework.
+LlamaIndex uses a set of [default prompt templates](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/default_prompts.py) that work well out of the box.
+
+In addition, there are some prompts written and used specifically for chat models like `gpt-3.5-turbo` [here](https://github.com/jerryjliu/llama_index/blob/main/llama_index/prompts/chat_prompts.py).
+
+Users may also provide their own prompt templates to further customize the behavior of the framework. The best method for customizing is copying the default prompt from the link above, and using that as the base for any modifications.
+
+## Usage Pattern
 
-## Defining a custom prompt
+### Defining a custom prompt
 
 Defining a custom prompt is as simple as creating a format string
 
@@ -25,41 +32,58 @@ qa_template = Prompt(template)
 
 > Note: you may see references to legacy prompt subclasses such as `QuestionAnswerPrompt`, `RefinePrompt`. These have been deprecated (and now are type aliases of `Prompt`). Now you can directly specify `Prompt(template)` to construct custom prompts. But you still have to make sure the template string contains the expected parameters (e.g. `{context_str}` and `{query_str}`) when replacing a default question answer prompt.
 
-## Passing custom prompts into the pipeline
+### Passing custom prompts into the pipeline
 
 Since LlamaIndex is a multi-step pipeline, it's important to identify the operation that you want to modify and pass in the custom prompt at the right place.
+
 At a high-level, prompts are used in 1) index construction, and 2) query engine execution
 
+The most commonly used prompts will be the `text_qa_template` and the `refine_template`. 
+
+- `text_qa_template` - used to get an initial answer to a query using retrieved nodes
+- `refine_tempalate` - used when the retrieved text does not fit into a single LLM call with `response_mode="compact"` (the default), or when more than one node is retrieved using `response_mode="refine"`. The answer from the first query is inserted as an `existing_answer`, and the LLM must update or repeat the existing answer based on the new context.
 
-### Modify prompts used in index construction
+#### Modify prompts used in index construction
 Different indices use different types of prompts during construction (some don't use prompts at all). 
 For instance, `TreeIndex` uses a `SummaryPrompt` to hierarchically
 summarize the nodes, and `KeywordTableIndex` uses a `KeywordExtractPrompt` to extract keywords.
 
 There are two equivalent ways to override the prompts:
+
 1. via the default nodes constructor 
+
 ```python
 index = TreeIndex(nodes, summary_template=<custom_prompt>)
 ```
 2. via the documents constructor.
+
 ```python
 index = TreeIndex.from_documents(docs, summary_template=<custom_prompt>)
 ```
 
 For more details on which index uses which prompts, please visit
-[Index class references](/reference/indices.rst).
+[Index class references](/api_reference/indices.rst).
+
+#### Modify prompts used in query engine
+More commonly, prompts are used at query-time (i.e. for executing a query against an index and synthesizing the final response). 
 
+There are also two equivalent ways to override the prompts:
 
-### Modify prompts used in query engine
-More commonly, prompts are used at query-time (i.e. for executing a query against an index and synthesizing the final response). There are also two equivalent ways to override the prompts:
 1. via the high-level API
 ```python
-query_engine = index.as_query_engine(text_qa_template=<custom_prompt>)
+query_engine = index.as_query_engine(
+    text_qa_template=<custom_qa_prompt>,
+    refine_template=<custom_refine_prompt>
+)
 ```
 2. via the low-level composition API
+
 ```python
 retriever = index.as_retriever()
-synth = ResponseSynthesizer.from_args(text_qa_template=<custom_prompt>)
+synth = get_response_synthesizer(
+    text_qa_template=<custom_qa_prompt>,
+    refine_template=<custom_refine_prompt>
+)
 query_engine = RetrieverQueryEngine(retriever, response_synthesizer)
 ```
 
@@ -67,44 +91,16 @@ The two approaches above are equivalent, where 1 is essentially syntactic sugar
 
 
 For more details on which classes use which prompts, please visit
-[Query class references](/reference/query.rst).
-
-
-## Full Example
-
-An example can be found in [this notebook](https://github.com/jerryjliu/llama_index/blob/main/examples/paul_graham_essay/TestEssay.ipynb).
+[Query class references](/api_reference/query.rst).
 
+Check out the [reference documentation](/api_reference/prompts.rst) for a full set of all prompts.
 
-A corresponding snippet is below. We show how to define a custom prompt for question answer which
-requires both a `context_str` and `query_str` field. The prompt is passed in during query-time.
-
-```python
-from llama_index import Prompt, VectorStoreIndex, SimpleDirectoryReader
-
-# load documents
-documents = SimpleDirectoryReader('data').load_data()
-
-# define custom Prompt
-TEMPLATE_STR = (
-    "We have provided context information below. \n"
-    "---------------------\n"
-    "{context_str}"
-    "\n---------------------\n"
-    "Given this information, please answer the question: {query_str}\n"
-)
-QA_TEMPLATE = Prompt(TEMPLATE_STR)
-
-# Build index 
-index = VectorStoreIndex.from_documents(documents)
-
-# Configure query engine
-query_engine = index.as_query_engine(text_qa_template=QA_TEMPLATE)
-
-# Execute query
-response = query_engine.query("What did the author do growing up?")
-print(response)
-
-```
-
+## Modules
 
-Check out the [reference documentation](/reference/prompts.rst) for a full set of all prompts.
+```{toctree}
+---
+maxdepth: 1
+---
+/examples/customization/prompts/completion_prompts.ipynb
+/examples/customization/prompts/chat_prompts.ipynb
+```
\ No newline at end of file
diff --git a/docs/how_to/chat_engine/modules.md b/docs/core_modules/query_modules/chat_engines/modules.md
similarity index 100%
rename from docs/how_to/chat_engine/modules.md
rename to docs/core_modules/query_modules/chat_engines/modules.md
diff --git a/docs/how_to/chat_engine/root.md b/docs/core_modules/query_modules/chat_engines/root.md
similarity index 58%
rename from docs/how_to/chat_engine/root.md
rename to docs/core_modules/query_modules/chat_engines/root.md
index 0e93b2bf6f..4a621bf500 100644
--- a/docs/how_to/chat_engine/root.md
+++ b/docs/core_modules/query_modules/chat_engines/root.md
@@ -1,15 +1,17 @@
-# 💬 Chat Engine
+# Chat Engine
 
 ## Concept
 Chat engine is a high-level interface for having a conversation with your data
 (multiple back-and-forth instead of a single question & answer).
 Think ChatGPT, but augmented with your knowledge base.  
 
-Conceptually, it is a **stateful** analogy of a [Query Engine](/how_to/query_engine/root.md). 
+Conceptually, it is a **stateful** analogy of a [Query Engine](../query_engine/root.md). 
 By keeping track of the conversation history, it can answer questions with past context in mind.  
 
 
-> If you want to ask standalone question over your data (i.e. without keeping track of conversation history), use [Query Engine](/how_to/query_engine/root.md) instead.  
+```{tip}
+If you want to ask standalone question over your data (i.e. without keeping track of conversation history), use [Query Engine](../query_engine/root.md) instead.  
+```
 
 ## Usage Pattern
 Get started with:
@@ -18,6 +20,14 @@ chat_engine = index.as_chat_engine()
 response = chat_engine.chat("Tell me a joke.")
 ```
 
+To stream response:
+```python
+chat_engine = index.as_chat_engine()
+streaming_response = chat_engine.stream_chat("Tell me a joke.")
+streaming_response.print_response_stream() 
+```
+
+
 ```{toctree}
 ---
 maxdepth: 2
diff --git a/docs/how_to/chat_engine/usage_pattern.md b/docs/core_modules/query_modules/chat_engines/usage_pattern.md
similarity index 77%
rename from docs/how_to/chat_engine/usage_pattern.md
rename to docs/core_modules/query_modules/chat_engines/usage_pattern.md
index 603c0fc5e6..bc14799c7e 100644
--- a/docs/how_to/chat_engine/usage_pattern.md
+++ b/docs/core_modules/query_modules/chat_engines/usage_pattern.md
@@ -7,7 +7,9 @@ Build a chat engine from index:
 chat_engine = index.as_chat_engine()
 ```
 
-> Note: To learn how to build an index, see [Index](/how_to/index/root.md)
+```{tip}
+To learn how to build an index, see [Index](/core_modules/data_modules/index/root.md)
+```
 
 Have a conversation with your data:
 ```python
@@ -85,3 +87,22 @@ chat_engine = CondenseQuestionChatEngine.from_defaults(
 )
 ```
 
+
+
+### Streaming
+To enable streaming, you simply need to call the `stream_chat` endpoint instead of the `chat` endpoint. 
+
+```{warning}
+This somewhat inconsistent with query engine (where you pass in a `streaming=True` flag). We are working on making the behavior more consistent! 
+```
+
+```python
+chat_engine = index.as_chat_engine()
+streaming_response = chat_engine.stream_chat("Tell me a joke.")
+streaming_response.print_response_stream() 
+```
+
+See an [end-to-end tutorial](/examples/customization/streaming/chat_engine_condense_question_stream_response.ipynb)
+
+
+
diff --git a/docs/core_modules/query_modules/node_postprocessors/modules.md b/docs/core_modules/query_modules/node_postprocessors/modules.md
new file mode 100644
index 0000000000..62ea932214
--- /dev/null
+++ b/docs/core_modules/query_modules/node_postprocessors/modules.md
@@ -0,0 +1,222 @@
+# Modules
+
+## SimilarityPostprocessor
+
+Used to remove nodes that are below a similarity score threshold.
+
+```python
+from llama_index.indices.postprocessor import SimilarityPostprocessor
+
+postprocessor = SimilarityPostprocessor(similarity_cutoff=0.7)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+## KeywordNodePostprocessor
+
+Used to ensure certain keywords are either excluded or included.
+
+```python
+from llama_index.indices.postprocessor import KeywordNodePostprocessor
+
+postprocessor = KeywordNodePostprocessor(
+  required_keywords=["word1", "word2"],
+  exclude_keywords=["word3", "word4"]
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+## SentenceEmbeddingOptimizer
+
+This postprocessor optimizes token usage by removing sentences that are not relevant to the query (this is done using embeddings).
+
+The percentile cutoff is a measure for using the top percentage of relevant sentences.
+
+The threshold cutoff can be specified instead, which uses a raw similarity cutoff for picking which sentences to keep.
+
+```python
+from llama_index.indices.postprocessor import SentenceEmbeddingOptimizer
+
+postprocessor = SentenceEmbeddingOptimizer(
+  embed_model=service_context.embed_model,
+  percentile_cutoff=0.5,
+  # threshold_cutoff=0.7
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+A full notebook guide can be found [here](/examples/node_postprocessor/OptimizerDemo.ipynb)
+
+## CohereRerank
+
+Uses the "Cohere ReRank" functionality to re-order nodes, and returns the top N nodes.
+
+```python
+from llama_index.indices.postprocessor import CohereRerank
+
+postprocessor = CohereRerank(
+  top_n=2
+  model="rerank-english-v2.0",
+  api_key="YOUR COHERE API KEY"
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+Full notebook guide is available [here](/examples/node_postprocessor/CohereRerank.ipynb).
+
+## LLM Rerank
+
+Uses a LLM to re-order nodes by asking the LLM to return the relevant documents and a score of how relevant they are. Returns the top N ranked nodes.
+
+```python
+from llama_index.indices.postprocessor import LLMRerank
+
+postprocessor = LLMRerank(
+  top_n=2
+  service_context=service_context,
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+Full notebook guide is available [her for Gatsby](/examples/node_postprocessor/LLMReranker-Gatsby.ipynb) and [here for Lyft 10K documents](/examples/node_postprocessor/LLMReranker-Lyft-10k.ipynb).
+
+## FixedRecencyPostprocessor
+
+This postproccesor returns the top K nodes sorted by date. This assumes there is a `date` field to parse in the metadata of each node.
+
+```python
+from llama_index.indices.postprocessor import FixedRecencyPostprocessor
+
+postprocessor = FixedRecencyPostprocessor(
+  tok_k=1,
+  date_key="date"  # the key in the metadata to find the date
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+![](/_static/node_postprocessors/recency.png)
+
+A full notebook guide is available [here](/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb).
+
+## EmbeddingRecencyPostprocessor
+
+This postproccesor returns the top K nodes after sorting by date and removing older nodes that are too similar after measuring embedding similarity.
+
+```python
+from llama_index.indices.postprocessor import EmbeddingRecencyPostprocessor
+
+postprocessor = EmbeddingRecencyPostprocessor(
+  service_context=service_context,
+  date_key="date",
+  similarity_cutoff=0.7
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+A full notebook guide is available [here](/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb).
+
+## TimeWeightedPostprocessor
+
+This postproccesor returns the top K nodes applying a time-weighted rerank to each node. Each time a node is retrieved, the time it was retrieved is recorded. This biases search to favor information that has not be returned in a query yet.
+
+```python
+from llama_index.indices.postprocessor import TimeWeightedPostprocessor
+
+postprocessor = TimeWeightedPostprocessor(
+  time_decay=0.99,
+  top_k=1
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+A full notebook guide is available [here](/examples/node_postprocessor/TimeWeightedPostprocessorDemo.ipynb).
+
+## (Beta) PIINodePostprocessor
+
+The PII (Personal Identifiable Information) postprocssor removes information that might be a security risk. It does this by using NER (either with a dedicated NER model, or with a local LLM model).
+
+### LLM Version
+
+```python
+from llama_index.indices.postprocessor import PIINodePostprocessor
+
+postprocessor = PIINodePostprocessor(
+  service_context=service_context,  # this should be setup with an LLM you trust
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+### NER Version
+
+This version uses the default local model from Hugging Face that is loaded when you run `pipline("ner")`.
+
+```python
+from llama_index.indices.postprocessor import NERPIINodePostprocessor
+
+postprocessor = NERPIINodePostprocessor()
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+A full notebook guide for both can be found [here](/examples/node_postprocessor/PII.ipynb).
+
+## (Beta) PrevNextNodePostprocessor
+
+Uses pre-defined settings to read the `Node` relationships and fetch either all nodes that come previously, next, or both.
+
+This is useful when you know the relationships point to important data (either before, after, or both) that should be sent to the LLM if that node is retrieved.
+
+```python
+from llama_index.indices.postprocessor import PrevNextNodePostprocessor
+
+postprocessor = PrevNextNodePostprocessor(
+  docstore=index.docstore,
+  num_nodes=1,  # number of nodes to fetch when looking forawrds or backwards
+  mode="next"   # can be either 'next', 'previous', or 'both'
+)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+![](/_static/node_postprocessors/prev_next.png)
+
+## (Beta) AutoPrevNextNodePostprocessor
+
+The same as PrevNextNodePostprocessor, but lets the LLM decide the mode (next, previous, or both).
+
+```python
+from llama_index.indices.postprocessor import AutoPrevNextNodePostprocessor
+
+postprocessor = AutoPrevNextNodePostprocessor(
+  docstore=index.docstore,
+  service_context=service_context
+  num_nodes=1,  # number of nodes to fetch when looking forawrds or backwards)
+
+postprocessor.postprocess_nodes(nodes)
+```
+
+A full example notebook is available [here](/examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb).
+
+## All Notebooks
+
+```{toctree}
+---
+maxdepth: 1
+---
+/examples/node_postprocessor/OptimizerDemo.ipynb
+/examples/node_postprocessor/CohereRerank.ipynb
+/examples/node_postprocessor/LLMReranker-Lyft-10k.ipynb
+/examples/node_postprocessor/LLMReranker-Gatsby.ipynb
+/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb
+/examples/node_postprocessor/TimeWeightedPostprocessorDemo.ipynb
+/examples/node_postprocessor/PII.ipynb
+/examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb
+```
\ No newline at end of file
diff --git a/docs/core_modules/query_modules/node_postprocessors/root.md b/docs/core_modules/query_modules/node_postprocessors/root.md
new file mode 100644
index 0000000000..aa77fb9724
--- /dev/null
+++ b/docs/core_modules/query_modules/node_postprocessors/root.md
@@ -0,0 +1,49 @@
+# Node Postprocessor
+
+## Concept
+Node postprocessors are a set of modules that take a set of nodes, and apply some kind of transformation or filtering before returning them.
+
+In LlamaIndex, node postprocessors are most commonly applied within a query engine, after the node retrieval step and before the response synthesis step.
+
+LlamaIndex offers several node postprocessors for immediate use, while also providing a simple API for adding your own custom postprocessors.
+
+```{tip}
+Confused about where node postprocessor fits in the pipeline? Read about [high-level concepts](/getting_started/concepts.md)
+```
+
+## Usage Pattern
+
+An example of using a node postprocessors is below:
+
+```python
+from llama_index.indices.postprocessor import SimilarityPostprocessor
+from llama_index.schema import Node, NodeWithScore
+
+nodes = [
+  NodeWithScore(node=Node(text="text"), score=0.7),
+  NodeWithScore(node=Node(text="text"), score=0.8)
+]
+
+# filter nodes below 0.75 similarity score
+processor = SimilarityPostprocessor(similarity_cutoff=0.75)
+filtered_nodes = processor.postprocess_nodes(nodes)
+```
+
+You can find more details using post processors and how to build your own below.
+
+```{toctree}
+---
+maxdepth: 2
+---
+usage_pattern.md
+```
+
+## Modules
+Below you can find guides for each node postprocessor.
+
+```{toctree}
+---
+maxdepth: 2
+---
+modules.md
+```
\ No newline at end of file
diff --git a/docs/core_modules/query_modules/node_postprocessors/usage_pattern.md b/docs/core_modules/query_modules/node_postprocessors/usage_pattern.md
new file mode 100644
index 0000000000..5a43a784ee
--- /dev/null
+++ b/docs/core_modules/query_modules/node_postprocessors/usage_pattern.md
@@ -0,0 +1,93 @@
+# Usage Pattern
+
+Most commonly, node-postprocessors will be used in a query engine, where they are applied to the nodes returned from a retriever, and before the response synthesis step.
+
+
+## Using with a Query Engine
+
+```python
+from llama_index import VectorStoreIndex, SimpleDirectoryReader
+from llama_index.indices.postprocessor import TimeWeightedPostprocessor
+
+documents = SimpleDirectoryReader("./data").load_data()
+
+index = VectorStoreIndex.from_documents(documents)
+
+query_engine = index.as_query_engine(
+  node_postprocessors=[
+    TimeWeightedPostprocessor(
+        time_decay=0.5, time_access_refresh=False, top_k=1
+    )
+  ]
+)
+
+# all node post-processors will be applied during each query
+response = query_engine.query("query string")
+```
+
+## Using with Retrieved Nodes
+
+Or used as a standalone object for filtering retrieved nodes:
+
+```python
+from llama_index.indices.postprocessor import SimilarityPostprocessor
+
+nodes = index.as_retriever().query("query string")
+
+# filter nodes below 0.75 similarity score
+processor = SimilarityPostprocessor(similarity_cutoff=0.75)
+filtered_nodes = processor.postprocess_nodes(nodes)
+```
+
+## Using with your own nodes
+
+As you may have noticed, the postprocessors take `NodeWithScore` objects as inputs, which is just a wrapper class with a `Node` and a `score` value.
+
+```python
+from llama_index.indices.postprocessor import SimilarityPostprocessor
+from llama_index.schema import Node, NodeWithScore
+
+nodes = [
+  NodeWithScore(node=Node(text="text"), score=0.7),
+  NodeWithScore(node=Node(text="text"), score=0.8)
+]
+
+# filter nodes below 0.75 similarity score
+processor = SimilarityPostprocessor(similarity_cutoff=0.75)
+filtered_nodes = processor.postprocess_nodes(nodes)
+```
+
+## Custom Node PostProcessor
+
+The base class is `BaseNodePostprocessor`, and the API interface is very simple: 
+
+```python
+class BaseNodePostprocessor:
+    """Node postprocessor."""
+
+    @abstractmethod
+    def postprocess_nodes(
+        self, nodes: List[NodeWithScore], query_bundle: Optional[QueryBundle]
+    ) -> List[NodeWithScore]:
+        """Postprocess nodes."""
+```
+
+A dummy node-postprocessor can be implemented in just a few lines of code:
+
+```python
+from llama_index import QueryBundle
+from llama_index.indices.postprocessor.base import BaseNodePostprocessor
+from llama_index.schema import NodeWithScore
+
+class DummyNodePostprocessor:
+
+    def postprocess_nodes(
+        self, nodes: List[NodeWithScore], query_bundle: Optional[QueryBundle]
+    ) -> List[NodeWithScore]:
+        
+        # subtracts 1 from the score
+        for n in nodes:
+            n.score -= 1
+
+        return nodes
+```
diff --git a/docs/how_to/query_engine/advanced/query_transformations.md b/docs/core_modules/query_modules/query_engine/advanced/query_transformations.md
similarity index 100%
rename from docs/how_to/query_engine/advanced/query_transformations.md
rename to docs/core_modules/query_modules/query_engine/advanced/query_transformations.md
diff --git a/docs/how_to/query_engine/modules.md b/docs/core_modules/query_modules/query_engine/modules.md
similarity index 100%
rename from docs/how_to/query_engine/modules.md
rename to docs/core_modules/query_modules/query_engine/modules.md
diff --git a/docs/how_to/query_engine/response_modes.md b/docs/core_modules/query_modules/query_engine/response_modes.md
similarity index 92%
rename from docs/how_to/query_engine/response_modes.md
rename to docs/core_modules/query_modules/query_engine/response_modes.md
index 7b7385ef68..d06e31783b 100644
--- a/docs/how_to/query_engine/response_modes.md
+++ b/docs/core_modules/query_modules/query_engine/response_modes.md
@@ -17,4 +17,4 @@ Right now, we support the following options:
     responses. Good for when you need to run the same query separately against each text
     chunk.
 
-See [Response Synthesis](/how_to/query_engine/advanced/response_synthesis.md) to learn more.
\ No newline at end of file
+See [Response Synthesizer](/core_modules/query_modules/response_synthesizers/root.md) to learn more.
\ No newline at end of file
diff --git a/docs/how_to/query_engine/root.md b/docs/core_modules/query_modules/query_engine/root.md
similarity index 51%
rename from docs/how_to/query_engine/root.md
rename to docs/core_modules/query_modules/query_engine/root.md
index 2c7bf437e3..60aee9e167 100644
--- a/docs/how_to/query_engine/root.md
+++ b/docs/core_modules/query_modules/query_engine/root.md
@@ -1,13 +1,15 @@
-# ❓ Query Engine
+# Query Engine
 
 ## Concept
 Query engine is a generic interface that allows you to ask question over your data.
-> If you want to have a conversation with your data (multiple back-and-forth instead of a single question & answer), take a look at [Chat Engine](/how_to/chat_engine/root.md)  
 
 A query engine takes in a natural language query, and returns a rich response.
-It is most often (but not always) built on one or many [Indices](/how_to/index/root.md) via [Retrievers](/how_to/retriever/root.md).
+It is most often (but not always) built on one or many [Indices](/core_modules/data_modules/index/root.md) via [Retrievers](/core_modules/query_modules/retriever/root.md).
 You can compose multiple query engines to achieve more advanced capability.
 
+```{tip}
+If you want to have a conversation with your data (multiple back-and-forth instead of a single question & answer), take a look at [Chat Engine](/core_modules/query_modules/chat_engines/root.md)  
+```
 
 ## Usage Pattern
 Get started with:
@@ -16,6 +18,13 @@ query_engine = index.as_query_engine()
 response = query_engine.query("Who is Paul Graham.")
 ```
 
+To stream response:
+```python
+query_engine = index.as_query_engine(streaming=True)
+streaming_response = query_engine.query("Who is Paul Graham.")
+streaming_response.print_response_stream() 
+```
+
 ```{toctree}
 ---
 maxdepth: 2
@@ -32,10 +41,11 @@ maxdepth: 3
 modules.md
 ```
 
-## Advanced Concepts
+
+## Supporting Modules
 ```{toctree}
 ---
 maxdepth: 2
 ---
-advanced/root.md
+supporting_modules.md
 ```
\ No newline at end of file
diff --git a/docs/how_to/customization/streaming.md b/docs/core_modules/query_modules/query_engine/streaming.md
similarity index 89%
rename from docs/how_to/customization/streaming.md
rename to docs/core_modules/query_modules/query_engine/streaming.md
index 49d91f77ab..0ed385b8ae 100644
--- a/docs/how_to/customization/streaming.md
+++ b/docs/core_modules/query_modules/query_engine/streaming.md
@@ -51,17 +51,6 @@ Alternatively, if you just want to print the text as they arrive:
 streaming_response.print_response_stream() 
 ```
 
-
-```{toctree}
----
-caption: Examples
-maxdepth: 1
----
-
-../../examples/customization/streaming/SimpleIndexDemo-streaming.ipynb
-../../examples/customization/streaming/chat_engine_condense_question_stream_response.ipynb
-```
-
-
+See an [end-to-end example](/examples/customization/streaming/SimpleIndexDemo-streaming.ipynb)
 
 
diff --git a/docs/core_modules/query_modules/query_engine/supporting_modules.md b/docs/core_modules/query_modules/query_engine/supporting_modules.md
new file mode 100644
index 0000000000..8b2c65800d
--- /dev/null
+++ b/docs/core_modules/query_modules/query_engine/supporting_modules.md
@@ -0,0 +1,8 @@
+# Supporting Modules
+
+```{toctree}
+---
+maxdepth: 1
+---
+advanced/query_transformations.md
+```
\ No newline at end of file
diff --git a/docs/how_to/query_engine/usage_pattern.md b/docs/core_modules/query_modules/query_engine/usage_pattern.md
similarity index 70%
rename from docs/how_to/query_engine/usage_pattern.md
rename to docs/core_modules/query_modules/query_engine/usage_pattern.md
index 821c85e63a..d66e54f11c 100644
--- a/docs/how_to/query_engine/usage_pattern.md
+++ b/docs/core_modules/query_modules/query_engine/usage_pattern.md
@@ -6,7 +6,9 @@ Build a query engine from index:
 query_engine = index.as_query_engine()
 ```
 
-> Note: To learn how to build an index, see [Index](/how_to/index/root.md)
+```{tip}
+To learn how to build an index, see [Index](/core_modules/data_modules/index/root.md)
+```
 
 Ask a question over your data
 ```python
@@ -24,7 +26,7 @@ query_engine = index.as_query_engine(
 ```
 > Note: While the high-level API optimizes for ease-of-use, it does *NOT* expose full range of configurability.  
 
-See [**Response Modes**](/how_to/query_engine/response_modes.md) for a full list of response modes and what they do.
+See [**Response Modes**](./response_modes.md) for a full list of response modes and what they do.
 
 ```{toctree}
 ---
@@ -32,6 +34,7 @@ maxdepth: 1
 hidden:
 ---
 response_modes.md
+streaming.md
 ```
 
 
@@ -75,7 +78,19 @@ query_engine = RetrieverQueryEngine(
 response = query_engine.query("What did the author do growing up?")
 print(response)
 ```
+### Streaming
+To enable streaming, you simply need to pass in a `streaming=True` flag
+
+```python
+query_engine = index.as_query_engine(
+    streaming=True,
+)
+streaming_response = query_engine.query(
+    "What did the author do growing up?", 
+)
+streaming_response.print_response_stream() 
+```
+
+* Read the full [streaming guide](/core_modules/query_modules/query_engine/streaming.md)
+* See an [end-to-end example](/examples/customization/streaming/SimpleIndexDemo-streaming.ipynb)
 
-## Advanced Configurations
-You can further configure the query engine with [advanced components](/how_to/query_engine/advanced/root.md)
-to reduce token cost, improve retrieval quality, etc. 
\ No newline at end of file
diff --git a/docs/core_modules/query_modules/response_synthesizers/modules.md b/docs/core_modules/query_modules/response_synthesizers/modules.md
new file mode 100644
index 0000000000..6e0a9f65e3
--- /dev/null
+++ b/docs/core_modules/query_modules/response_synthesizers/modules.md
@@ -0,0 +1,62 @@
+# Module Guide
+
+Detailed inputs/outputs for each response synthesizer are found below. 
+
+## API Example
+
+The following shows the setup for utilizing all kwargs.
+
+- `response_mode` specifies which response synthesizer to use
+- `service_context` defines the LLM and related settings for synthesis
+- `text_qa_template` and `refine_template` are the prompts used at various stages
+- `use_async` is used for only the `tree_summarize` response mode right now, to asynchronously build the summary tree
+- `streaming` configures whether to return a streaming response object or not
+
+In the `synthesize`/`asyntheszie` functions, you can optionally provide additional source nodes, which will be added to the `response.source_nodes` list.
+
+```python
+from llama_index.schema import Node, NodeWithScore
+from llama_index import get_response_synthesizer
+
+response_synthesizer = get_response_synthesizer(
+  response_mode="refine",
+  service_context=service_context,
+  text_qa_template=text_qa_template,
+  refine_template=refine_template,
+  use_async=False,
+  streaming=False
+)
+
+# synchronous
+response = response_synthesizer.synthesize(
+  "query string", 
+  nodes=[NodeWithScore(node=Node(text="text"), score=1.0), ..],
+  additional_source_nodes=[NodeWithScore(node=Node(text="text"), score=1.0), ..], 
+)
+
+# asynchronous
+response = await response_synthesizer.asynthesize(
+  "query string", 
+  nodes=[NodeWithScore(node=Node(text="text"), score=1.0), ..],
+  additional_source_nodes=[NodeWithScore(node=Node(text="text"), score=1.0), ..], 
+)
+```
+
+You can also directly return a string, using the lower-level `get_response` and `aget_response` functions
+
+```python
+response_str = response_synthesizer.get_response(
+  "query string", 
+  text_chunks=["text1", "text2", ...]
+)
+```
+
+## Example Notebooks
+
+```{toctree}
+---
+maxdepth: 1
+---
+/examples/response_synthesizers/refine.ipynb
+/examples/response_synthesizers/tree_summarize.ipynb
+```
diff --git a/docs/core_modules/query_modules/response_synthesizers/root.md b/docs/core_modules/query_modules/response_synthesizers/root.md
new file mode 100644
index 0000000000..919152f592
--- /dev/null
+++ b/docs/core_modules/query_modules/response_synthesizers/root.md
@@ -0,0 +1,50 @@
+# Response Synthesizer
+
+## Concept
+A `Response Synthesizer` is what generates a response from an LLM, using a user query and a given set of text chunks. The output of a response synthesizer is a `Response` object.
+
+The method for doing this can take many forms, from as simple as iterating over text chunks, to as complex as building a tree. The main idea here is to simplify the process of generating a response using an LLM across your data.
+
+When used in a query engine, the response synthesizer is used after nodes are retrieved from a retriever, and after any node-postprocessors are ran.
+
+```{tip}
+Confused about where response synthesizer fits in the pipeline? Read the [high-level concepts](/getting_started/concepts.md)
+```
+
+## Usage Pattern
+Use a response synthesizer on it's own:
+
+```python
+from llama_index.schema import Node
+from llama_index.response_synthesizers import get_response_synthesizer
+
+response_synthesizer = get_response_synthesizer(response_mode='compact')
+
+response = response_synthesizer.synthesize("query text", nodes=[Node(text="text"), ...])
+```
+
+Or in a query engine after you've created an index:
+
+```python
+query_engine = index.as_query_engine(response_synthesizer=response_synthesizer)
+response = query_engine.query("query_text")
+```
+
+You can find more details on all available response synthesizers, modes, and how to build your own below.
+
+```{toctree}
+---
+maxdepth: 2
+---
+usage_pattern.md
+```
+
+## Modules
+Below you can find detailed API information for each response synthesis module.
+
+```{toctree}
+---
+maxdepth: 1
+---
+modules.md
+```
\ No newline at end of file
diff --git a/docs/core_modules/query_modules/response_synthesizers/usage_pattern.md b/docs/core_modules/query_modules/response_synthesizers/usage_pattern.md
new file mode 100644
index 0000000000..9a6ae8fa66
--- /dev/null
+++ b/docs/core_modules/query_modules/response_synthesizers/usage_pattern.md
@@ -0,0 +1,95 @@
+# Usage Pattern
+
+## Get Started
+
+Configuring the response synthesizer for a query engine using `response_mode`:
+
+```python
+from llama_index.schema import Node, NodeWithScore
+from llama_index.response_synthesizers import get_response_synthesizer
+
+response_synthesizer = get_response_synthesizer(response_mode='compact')
+
+response = response_synthesizer.synthesize(
+  "query text", 
+  nodes=[NodeWithScore(node=Node(text="text"), score=1.0), ..]
+)
+```
+
+Or, more commonly, in a query engine after you've created an index:
+
+```python
+query_engine = index.as_query_engine(response_synthesizer=response_synthesizer)
+response = query_engine.query("query_text")
+```
+
+```{tip}
+To learn how to build an index, see [Index](/core_modules/data_modules/index/root.md)
+```
+
+## Configuring the Response Mode
+Response synthesizers are typically specified through a `response_mode` kwarg setting.
+
+Several response synthesizers are implemented already in LlamaIndex:
+
+- `refine`: "create and refine" an answer by sequentially going through each retrieved text chunk. 
+    This makes a separate LLM call per Node. Good for more detailed answers.
+- `compact` (default): "compact" the prompt during each LLM call by stuffing as 
+    many text chunks that can fit within the maximum prompt size. If there are 
+    too many chunks to stuff in one prompt, "create and refine" an answer by going through
+    multiple compact prompts. The same as `refine`, but should result in less LLM calls.
+- `tree_summarize`: Given a set of text chunks and the query, recursively construct a tree 
+    and return the root node as the response. Good for summarization purposes.
+- `simple_summarize`: Truncates all text chunks to fit into a single LLM prompt. Good for quick
+    summarization purposes, but may lose detail due to truncation.
+- `no_text`: Only runs the retriever to fetch the nodes that would have been sent to the LLM, 
+    without actually sending them. Then can be inspected by checking `response.source_nodes`.
+- `accumulate`: Given a set of text chunks and the query, apply the query to each text
+    chunk while accumulating the responses into an array. Returns a concatenated string of all
+    responses. Good for when you need to run the same query separately against each text
+    chunk.
+- `compact_accumulate`: The same as accumulate, but will "compact" each LLM prompt similar to
+    `compact`, and run the same query against each text chunk.
+
+## Custom Response Synthesizers
+
+Each response synthesizer inherits from `llama_index.response_synthesizers.base.BaseSynthesizer`. The base API is extremely simple, which makes it easy to create your own response synthesizer.
+
+Maybe you want to customize which template is used at each step in `tree_summarize`, or maybe a new research paper came out detailing a new way to generate a response to a query, you can create your own response synthesizer and plug it into any query engine or use it on it's own.
+
+Below we show the `__init__()` function, as well as the two abstract methods that every response synthesizer must implement. The basic requirements are to process a query and text chunks, and return a string (or string generator) response.
+
+```python
+class BaseSynthesizer(ABC):
+    """Response builder class."""
+
+    def __init__(
+        self,
+        service_context: Optional[ServiceContext] = None,
+        streaming: bool = False,
+    ) -> None:
+        """Init params."""
+        self._service_context = service_context or ServiceContext.from_defaults()
+        self._callback_manager = self._service_context.callback_manager
+        self._streaming = streaming
+
+    @abstractmethod
+    def get_response(
+        self,
+        query_str: str,
+        text_chunks: Sequence[str],
+        **response_kwargs: Any,
+    ) -> RESPONSE_TEXT_TYPE:
+        """Get response."""
+        ...
+
+    @abstractmethod
+    async def aget_response(
+        self,
+        query_str: str,
+        text_chunks: Sequence[str],
+        **response_kwargs: Any,
+    ) -> RESPONSE_TEXT_TYPE:
+        """Get response."""
+        ...
+```
diff --git a/docs/how_to/retriever/modules.md b/docs/core_modules/query_modules/retriever/modules.md
similarity index 91%
rename from docs/how_to/retriever/modules.md
rename to docs/core_modules/query_modules/retriever/modules.md
index 2ab16c6bb3..03ff29d90d 100644
--- a/docs/how_to/retriever/modules.md
+++ b/docs/core_modules/query_modules/retriever/modules.md
@@ -1,6 +1,6 @@
 # Module Guides
 We are adding more module guides soon!
-In the meanwhile, please take a look at the [API References](/reference/query/retrievers.rst).
+In the meanwhile, please take a look at the [API References](/api_reference/query/retrievers.rst).
 
 ## Vector Index Retrievers
 * VectorIndexRetriever
diff --git a/docs/how_to/retriever/retriever_modes.md b/docs/core_modules/query_modules/retriever/retriever_modes.md
similarity index 94%
rename from docs/how_to/retriever/retriever_modes.md
rename to docs/core_modules/query_modules/retriever/retriever_modes.md
index 1539a2eb86..67f9c79a88 100644
--- a/docs/how_to/retriever/retriever_modes.md
+++ b/docs/core_modules/query_modules/retriever/retriever_modes.md
@@ -32,4 +32,4 @@ Specifying `retriever_mode` has no effect (silently ignored).
 
 ## Document Summary Index
 * `default`: DocumentSummaryIndexRetriever
-* `embedding`: DocumentSummaryIndexEmbeddingRetriever
\ No newline at end of file
+* `embedding`: DocumentSummaryIndexEmbeddingRetrievers
\ No newline at end of file
diff --git a/docs/core_modules/query_modules/retriever/root.md b/docs/core_modules/query_modules/retriever/root.md
new file mode 100644
index 0000000000..922638c6d1
--- /dev/null
+++ b/docs/core_modules/query_modules/retriever/root.md
@@ -0,0 +1,37 @@
+
+# Retriever
+
+## Concept
+
+Retrievers are responsible for fetching the most relevant context given a user query (or chat message).  
+
+It can be built on top of [Indices](/core_modules/data_modules/index/root.md), but can also be defined independently.
+It is used as a key building block in [Query Engines](/core_modules/query_modules/query_engine/root.md) (and [Chat Engines](/core_modules/query_modules/chat_engines/root.md)) for retrieving relevant context.
+
+```{tip}
+Confused about where retriever fits in the pipeline? Read about [high-level concepts](/getting_started/concepts.md)
+```
+
+## Usage Pattern
+
+Get started with:
+```python
+retriever = index.as_retriever()
+nodes = retriever.retrieve("Who is Paul Graham?")
+```
+
+```{toctree}
+---
+maxdepth: 2
+---
+usage_pattern.md
+```
+
+
+## Modules
+```{toctree}
+---
+maxdepth: 2
+---
+modules.md
+```
\ No newline at end of file
diff --git a/docs/how_to/retriever/usage_pattern.md b/docs/core_modules/query_modules/retriever/usage_pattern.md
similarity index 81%
rename from docs/how_to/retriever/usage_pattern.md
rename to docs/core_modules/query_modules/retriever/usage_pattern.md
index fcfb1df208..77ddcb688d 100644
--- a/docs/how_to/retriever/usage_pattern.md
+++ b/docs/core_modules/query_modules/retriever/usage_pattern.md
@@ -1,6 +1,5 @@
 # Usage Pattern
 
-
 ## Get Started
 Get a retriever from index:
 ```python
@@ -12,9 +11,10 @@ Retrieve relevant context for a question:
 nodes = retriever.retrieve('Who is Paul Graham?')
 ```
 
-> Note: To learn how to build an index, see [Index](/how_to/index/root.md)
+> Note: To learn how to build an index, see [Index](/core_modules/data_modules/index/root.md)
 
 ## High-Level API
+
 ### Selecting a Retriever
 
 You can select the index-specific retriever class via `retriever_mode`. 
@@ -24,9 +24,9 @@ retriever = list_index.as_retriever(
     retriever_mode='llm',
 )
 ```
-This creates a [ListIndexLLMRetriever](/reference/query/retrievers/list.rst) on top of the list index.
+This creates a [ListIndexLLMRetriever](/api_reference/query/retrievers/list.rst) on top of the list index.
 
-See [**Retriever Modes**](/how_to/retriever/retriever_modes.md) for a full list of (index-specific) retriever modes
+See [**Retriever Modes**](/core_modules/query_modules/retriever/retriever_modes.md) for a full list of (index-specific) retriever modes
 and the retriever classes they map to.
 
 ```{toctree}
diff --git a/docs/how_to/structured_outputs/output_parser.md b/docs/core_modules/query_modules/structured_outputs/output_parser.md
similarity index 93%
rename from docs/how_to/structured_outputs/output_parser.md
rename to docs/core_modules/query_modules/structured_outputs/output_parser.md
index 49db675fef..c5fcc15e7d 100644
--- a/docs/how_to/structured_outputs/output_parser.md
+++ b/docs/core_modules/query_modules/structured_outputs/output_parser.md
@@ -70,7 +70,7 @@ query_engine = index.as_query_engine(
     service_context=ServiceContext.from_defaults(
         llm_predictor=llm_predictor
     ),
-    text_qa_temjlate=qa_prompt, 
+    text_qa_template=qa_prompt, 
     refine_template=refine_prompt, 
 )
 response = query_engine.query(
@@ -148,10 +148,9 @@ caption: Examples
 maxdepth: 1
 ---
 
-../examples/output_parsing/GuardrailsDemo.ipynb
-../examples/output_parsing/LangchainOutputParserDemo.ipynb
-../examples/output_parsing/guidance_pydantic_program.ipynb
-../examples/output_parsing/guidance_sub_question.ipynb
-../examples/output_parsing/openai_pydantic_program.ipynb
-../examples/output_parsing/df_output_parser.ipynb
+/examples/output_parsing/GuardrailsDemo.ipynb
+/examples/output_parsing/LangchainOutputParserDemo.ipynb
+/examples/output_parsing/guidance_pydantic_program.ipynb
+/examples/output_parsing/guidance_sub_question.ipynb
+/examples/output_parsing/openai_pydantic_program.ipynb
 ```
\ No newline at end of file
diff --git a/docs/how_to/structured_outputs/pydantic_program.md b/docs/core_modules/query_modules/structured_outputs/pydantic_program.md
similarity index 100%
rename from docs/how_to/structured_outputs/pydantic_program.md
rename to docs/core_modules/query_modules/structured_outputs/pydantic_program.md
diff --git a/docs/how_to/structured_outputs/root.md b/docs/core_modules/query_modules/structured_outputs/root.md
similarity index 93%
rename from docs/how_to/structured_outputs/root.md
rename to docs/core_modules/query_modules/structured_outputs/root.md
index 8ab6d5365f..a32026b5e7 100644
--- a/docs/how_to/structured_outputs/root.md
+++ b/docs/core_modules/query_modules/structured_outputs/root.md
@@ -1,6 +1,6 @@
-# 🔢 Structured Outputs
+# Structured Outputs
 
- The ability of LLMs to produce structured outputs are important for downstream applications that rely on reliably parsing output values. 
+The ability of LLMs to produce structured outputs are important for downstream applications that rely on reliably parsing output values. 
 LlamaIndex itself also relies on structured output in the following ways.
 - **Document retrieval**: Many data structures within LlamaIndex rely on LLM calls with a specific schema for Document retrieval. For instance, the tree index expects LLM calls to be in the format "ANSWER: (number)".
 - **Response synthesis**: Users may expect that the final response contains some degree of structure (e.g. a JSON output, a formatted SQL query, etc.)
diff --git a/docs/core_modules/supporting_modules/callbacks/root.md b/docs/core_modules/supporting_modules/callbacks/root.md
new file mode 100644
index 0000000000..c7130dd57e
--- /dev/null
+++ b/docs/core_modules/supporting_modules/callbacks/root.md
@@ -0,0 +1,50 @@
+# Callbacks
+
+## Concept
+LlamaIndex provides callbacks to help debug, track, and trace the inner workings of the library. 
+Using the callback manager, as many callbacks as needed can be added.
+
+In addition to logging data related to events, you can also track the duration and number of occurances
+of each event. 
+
+Furthermore, a trace map of events is also recorded, and callbacks can use this data
+however they want. For example, the `LlamaDebugHandler` will, by default, print the trace of events
+after most operations.
+
+**Callback Event Types**  
+While each callback may not leverage each event type, the following events are available to be tracked:
+
+- `CHUNKING` -> Logs for the before and after of text splitting.
+- `NODE_PARSING` -> Logs for the documents and the nodes that they are parsed into.
+- `EMBEDDING` -> Logs for the number of texts embedded.
+- `LLM` -> Logs for the template and response of LLM calls.
+- `QUERY` -> Keeps track of the start and end of each query.
+- `RETRIEVE` -> Logs for the nodes retrieved for a query.
+- `SYNTHESIZE` -> Logs for the result for synthesize calls.
+- `TREE` -> Logs for the summary and level of summaries generated.
+- `SUB_QUESTIONS` -> Logs for the sub questions and answers generated.
+
+You can implement your own callback to track and trace these events, or use an existing callback.
+
+
+## Modules
+
+Currently supported callbacks are as follows:
+
+- [TokenCountingHandler](/examples/callbacks/TokenCountingHandler.ipynb) -> Flexible token counting for prompt, completion, and embedding token usage. See the migration details [here](/core_modules/model_modules/callbacks/token_counting_migration.md)
+- [LlamaDebugHanlder](/examples/callbacks/LlamaDebugHandler.ipynb) -> Basic tracking and tracing for events. Example usage can be found in the notebook below.
+- [WandbCallbackHandler](/examples/callbacks/WandbCallbackHandler.ipynb) -> Tracking of events and traces using the Wandb Prompts frontend. More details are in the notebook below or at [Wandb](https://docs.wandb.ai/guides/prompts/quickstart)
+- [AimCallback](/examples/callbacks/AimCallback.ipynb) -> Tracking of LLM inputs and outputs. Example usage can be found in the notebook below.
+
+
+```{toctree}
+---
+maxdepth: 1
+hidden:
+---
+/examples/callbacks/TokenCountingHandler.ipynb
+/examples/callbacks/LlamaDebugHandler.ipynb
+/examples/callbacks/WandbCallbackHandler.ipynb
+/examples/callbacks/AimCallback.ipynb
+token_counting_migration.md
+```
\ No newline at end of file
diff --git a/docs/how_to/callbacks/token_counting_migration.md b/docs/core_modules/supporting_modules/callbacks/token_counting_migration.md
similarity index 96%
rename from docs/how_to/callbacks/token_counting_migration.md
rename to docs/core_modules/supporting_modules/callbacks/token_counting_migration.md
index bb54aba473..c92e40ce73 100644
--- a/docs/how_to/callbacks/token_counting_migration.md
+++ b/docs/core_modules/supporting_modules/callbacks/token_counting_migration.md
@@ -30,7 +30,7 @@ service_context = ServiceContext.from_defaults(callback_manager=callback_manager
 document = SimpleDirectoryReader("./data").load_data()
 
 # if verbose is turned on, you will see embedding token usage printed
-index = VectorStoreIndex.from_documents(documents)
+index = VectorStoreIndex.from_documents(documents, service_context=service_context)
 
 # otherwise, you can access the count directly
 print(token_counter.total_embedding_token_count)
diff --git a/docs/how_to/analysis/cost_analysis.md b/docs/core_modules/supporting_modules/cost_analysis/root.md
similarity index 56%
rename from docs/how_to/analysis/cost_analysis.md
rename to docs/core_modules/supporting_modules/cost_analysis/root.md
index b00db09d1d..f1dedeabed 100644
--- a/docs/how_to/analysis/cost_analysis.md
+++ b/docs/core_modules/supporting_modules/cost_analysis/root.md
@@ -1,6 +1,7 @@
 # Cost Analysis
 
-Each call to an LLM will cost some amount of money - for instance, OpenAI's Davinci costs $0.02 / 1k tokens. The cost of building an index and querying depends on 
+## Concept
+Each call to an LLM will cost some amount of money - for instance, OpenAI's gpt-3.5-turbo costs $0.002 / 1k tokens. The cost of building an index and querying depends on 
 
 - the type of LLM used
 - the type of data structure used
@@ -25,7 +26,6 @@ The following indices do require LLM calls during build time:
 - `TreeIndex` - use LLM to hierarchically summarize the text to build the tree
 - `KeywordTableIndex` - use LLM to extract keywords from each document
 
-
 ### Query Time
 
 There will always be >= 1 LLM call during query time, in order to synthesize the final answer. 
@@ -39,90 +39,59 @@ Here are some notes regarding each of the indices:
     - Setting `child_branch_factor=2` will be more expensive than the default `child_branch_factor=1` (polynomial vs logarithmic), because we traverse 2 children instead of just 1 for each parent node.
 - `KeywordTableIndex`: by default requires an LLM call to extract query keywords.
     - Can do `index.as_retriever(retriever_mode="simple")` or `index.as_retriever(retriever_mode="rake")` to also use regex/RAKE keyword extractors on your query text.
+-  `VectorStoreIndex`: by default, requires one LLM call per query. If you increase the `similarity_top_k` or `chunk_size`, or change the `response_mode`, then this number will increase.
 
-
-### Token Predictor Usage
+## Usage Pattern
 
 LlamaIndex offers token **predictors** to predict token usage of LLM and embedding calls.
 This allows you to estimate your costs during 1) index construction, and 2) index querying, before
 any respective LLM calls are made.
 
-#### Using MockLLMPredictor
-
-To predict token usage of LLM calls, import and instantiate the MockLLMPredictor with the following:
-```python
-from llama_index import MockLLMPredictor, ServiceContext
+Tokens are counted using the `TokenCountingHandler` callback. See the [example notebook](../../../examples/callbacks/TokenCountingHandler.ipynb) for details on the setup.
 
-llm_predictor = MockLLMPredictor(max_tokens=256)
-```
+### Using MockLLM
 
-You can then use this predictor during both index construction and querying. Examples are given below.
+To predict token usage of LLM calls, import and instantiate the MockLLM as shown below. The `max_tokens` parameter is used as a "worst case" prediction, where each LLM response will contain exactly that number of tokens. If `max_tokens` is not specified, then it will simply predict back the prompt.
 
-**Index Construction**
 ```python
-from llama_index import TreeIndex, MockLLMPredictor, SimpleDirectoryReader
-
-documents = SimpleDirectoryReader('../paul_graham_essay/data').load_data()
-# the "mock" llm predictor is our token counter
-llm_predictor = MockLLMPredictor(max_tokens=256)
-service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
-# pass the "mock" llm_predictor into TreeIndex during index construction
-index = TreeIndex.from_documents(documents, service_context=service_context)
+from llama_index import ServiceContext, set_global_service_context
+from llama_index.llms import MockLLM
 
-# get number of tokens used
-print(llm_predictor.last_token_usage)
-```
+llm = MockLLM(max_tokens=256)
 
-**Index Querying**
+service_context = ServiceContext.from_defaults(llm=llm)
 
-```python
-query_engine = index.as_query_engine(
-    service_context=service_context
-)
-response = query_engine.query("What did the author do growing up?")
-
-# get number of tokens used
-print(llm_predictor.last_token_usage)
+# optionally set a global service context
+set_global_service_context(service_context)
 ```
 
-#### Using MockEmbedding
+You can then use this predictor during both index construction and querying. 
+
+### Using MockEmbedding
 
 You may also predict the token usage of embedding calls with `MockEmbedding`. 
-You can use it in tandem with `MockLLMPredictor`.
 
 ```python
-from llama_index import (
-    VectorStoreIndex, 
-    MockLLMPredictor, 
-    MockEmbedding, 
-    SimpleDirectoryReader,
-    ServiceContext
-)
-
-documents = SimpleDirectoryReader('../paul_graham_essay/data').load_data()
-index = VectorStoreIndex.from_documents(documents)
-
-# specify both a MockLLMPredictor as wel as MockEmbedding
-llm_predictor = MockLLMPredictor(max_tokens=256)
+from llama_index import ServiceContext, set_global_service_context
+from llama_index import MockEmbedding
+
+# specify a MockLLMPredictor
 embed_model = MockEmbedding(embed_dim=1536)
-service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, embed_model=embed_model)
-
-query_engine = index.as_query_engine(
-    service_context=service_context
-)
-response = query_engine.query(
-    "What did the author do after his time at Y Combinator?",
-)
-```
 
+service_context = ServiceContext.from_defaults(embed_model=embed_model)
 
-[Here is an example notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/analysis/TokenPredictor.ipynb).  
+# optionally set a global service context
+set_global_service_context(service_context)
+```
+
+## Usage Pattern
 
+Read about the full usage pattern below!
 
 ```{toctree}
 ---
 caption: Examples
 maxdepth: 1
 ---
-../../examples/analysis/TokenPredictor.ipynb
-```
\ No newline at end of file
+usage_pattern.md
+```
diff --git a/docs/core_modules/supporting_modules/cost_analysis/usage_pattern.md b/docs/core_modules/supporting_modules/cost_analysis/usage_pattern.md
new file mode 100644
index 0000000000..145c0d35c4
--- /dev/null
+++ b/docs/core_modules/supporting_modules/cost_analysis/usage_pattern.md
@@ -0,0 +1,97 @@
+# Usage Pattern
+
+## Estimating LLM and Embedding Token Counts
+
+In order to measure LLM and Embedding token counts, you'll need to
+
+1. Setup `MockLLM` and `MockEmbedding` objects
+
+```python
+from llama_index.llms import MockLLM
+from llama_index import MockEmbedding
+
+llm = MockLLM(max_tokens=256)
+embed_model = MockEmbedding(embed_dim=1536)
+```
+
+2. Setup the `TokenCountingCallback` handler
+
+```python
+import tiktoken
+from llama_index.callbacks import CallbackManager, TokenCountingHandler
+
+token_counter = TokenCountingHandler(
+    tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode
+)
+
+callback_manager = CallbackManager([token_counter])
+```
+
+3. Add them to the global `ServiceContext`
+
+```python
+from llama_index import ServiceContext, set_global_service_context
+
+set_global_service_context(
+    ServiceContext.from_defaults(
+        llm=llm, 
+        embed_model=embed_model, 
+        callback_manager=callback_manager
+    )
+)
+```
+
+4. Construct an Index 
+
+```python
+from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+documents = SimpleDirectoryReader("./docs/examples/data/paul_graham").load_data()
+
+index = VectorStoreIndex.from_documents(documents)
+```
+
+5. Measure the counts!
+
+```python
+print(
+    "Embedding Tokens: ",
+    token_counter.total_embedding_token_count,
+    "\n",
+    "LLM Prompt Tokens: ",
+    token_counter.prompt_llm_token_count,
+    "\n",
+    "LLM Completion Tokens: ",
+    token_counter.completion_llm_token_count,
+    "\n",
+    "Total LLM Token Count: ",
+    token_counter.total_llm_token_count,
+    "\n",
+)
+
+# reset counts
+token_counter.reset_counts()
+```
+
+6. Run a query, mesaure again
+
+```python
+query_engine = index.as_query_engine()
+
+response = query_engine.query("query")
+
+print(
+    "Embedding Tokens: ",
+    token_counter.total_embedding_token_count,
+    "\n",
+    "LLM Prompt Tokens: ",
+    token_counter.prompt_llm_token_count,
+    "\n",
+    "LLM Completion Tokens: ",
+    token_counter.completion_llm_token_count,
+    "\n",
+    "Total LLM Token Count: ",
+    token_counter.total_llm_token_count,
+    "\n",
+)
+```
diff --git a/docs/core_modules/supporting_modules/evaluation/modules.md b/docs/core_modules/supporting_modules/evaluation/modules.md
new file mode 100644
index 0000000000..5af2663269
--- /dev/null
+++ b/docs/core_modules/supporting_modules/evaluation/modules.md
@@ -0,0 +1,13 @@
+# Modules
+
+Notebooks with usage of these components can be found below.
+
+```{toctree}
+---
+maxdepth: 1
+---
+
+../../../examples/evaluation/TestNYC-Evaluation.ipynb
+../../../examples/evaluation/TestNYC-Evaluation-Query.ipynb
+../../../examples/evaluation/QuestionGeneration.ipynb
+```
\ No newline at end of file
diff --git a/docs/core_modules/supporting_modules/evaluation/root.md b/docs/core_modules/supporting_modules/evaluation/root.md
new file mode 100644
index 0000000000..1d77d2c9b4
--- /dev/null
+++ b/docs/core_modules/supporting_modules/evaluation/root.md
@@ -0,0 +1,64 @@
+# Evaluation
+
+## Concept
+Evaluation in generative AI and retrieval is a difficult task. Due to the unpredictable nature of text, and a general lack of "expected" outcomes to compare against, there are many blockers to getting started with evaluation.
+
+However, LlamaIndex offers a few key modules for evaluating the quality of both Document retrieval and response synthesis.
+Here are some key questions for each component:
+
+- **Document retrieval**: Are the sources relevant to the query?
+- **Response synthesis**: Does the response match the retrieved context? Does it also match the query? 
+
+This guide describes how the evaluation components within LlamaIndex work. Note that our current evaluation modules
+do *not* require ground-truth labels. Evaluation can be done with some combination of the query, context, response,
+and combine these with LLM calls.
+
+### Evaluation of the Response + Context
+
+Each response from a `query_engine.query` calls returns both the synthesized response as well as source documents.
+
+We can evaluate the response against the retrieved sources - without taking into account the query!
+
+This allows you to measure hallucination - if the response does not match the retrieved sources, this means that the model may be "hallucinating" an answer since it is not rooting the answer in the context provided to it in the prompt.
+
+There are two sub-modes of evaluation here. We can either get a binary response "YES"/"NO" on whether response matches *any* source context,
+and also get a response list across sources to see which sources match.
+
+The `ResponseEvaluator` handles both modes for evaluating in this context.
+
+### Evaluation of the Query + Response + Source Context
+
+This is similar to the above section, except now we also take into account the query. The goal is to determine if
+the response + source context answers the query.
+
+As with the above, there are two submodes of evaluation. 
+- We can either get a binary response "YES"/"NO" on whether
+the response matches the query, and whether any source node also matches the query.
+- We can also ignore the synthesized response, and check every source node to see
+if it matches the query.
+
+### Question Generation
+
+In addition to evaluating queries, LlamaIndex can also use your data to generate questions to evaluate on. This means that you can automatically generate questions, and then run an evaluation pipeline to test if the LLM can actually answer questions accurately using your data.
+
+## Usage Pattern
+
+For full usage details, see the usage pattern below.
+
+```{toctree}
+---
+maxdepth: 1
+---
+usage_pattern.md
+```
+
+## Modules
+
+Notebooks with usage of these components can be found below.
+
+```{toctree}
+---
+maxdepth: 1
+---
+modules.md
+```
\ No newline at end of file
diff --git a/docs/how_to/evaluation/evaluation.md b/docs/core_modules/supporting_modules/evaluation/usage_pattern.md
similarity index 55%
rename from docs/how_to/evaluation/evaluation.md
rename to docs/core_modules/supporting_modules/evaluation/usage_pattern.md
index 09bbd4f3cb..01aa86121b 100644
--- a/docs/how_to/evaluation/evaluation.md
+++ b/docs/core_modules/supporting_modules/evaluation/usage_pattern.md
@@ -1,25 +1,6 @@
-# 🔬 Evaluation
+# Usage Pattern
 
-LlamaIndex offers a few key modules for evaluating the quality of both Document retrieval and response synthesis.
-Here are some key questions for each component:
-- **Document retrieval**: Are the sources relevant to the query?
-- **Response synthesis**: Does the response match the retrieved context? Does it also match the query? 
-
-This guide describes how the evaluation components within LlamaIndex work. Note that our current evaluation modules
-do *not* require ground-truth labels. Evaluation can be done with some combination of the query, context, response,
-and combine these with LLM calls.
-
-## Evaluation of the Response + Context
-
-Each response from an `query_engine.query` calls returns both the synthesized response as well as source documents.
-
-We can evaluate the response against the retrieved sources - without taking into account the query!
-
-This allows you to measure hallucination - if the response does not match the retrieved sources, this means that the model may be "hallucinating" an answer
-since it is not rooting the answer in the context provided to it in the prompt.
-
-There are two sub-modes of evaluation here. We can either get a binary response "YES"/"NO" on whether response matches *any* source context,
-and also get a response list across sources to see which sources match.
+## Evaluating Response for Hallucination
 
 ### Binary Evaluation
 
@@ -27,11 +8,12 @@ This mode of evaluation will return "YES"/"NO" if the synthesized response match
 
 ```python
 from llama_index import VectorStoreIndex
+from llama_index.llms import OpenAI
 from llama_index.evaluation import ResponseEvaluator
 
 # build service context
-llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-4"))
-service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
+llm = OpenAI(model="gpt-4", temperature=0.0)
+service_context = ServiceContext.from_defaults(llm=llm)
 
 # build index
 ...
@@ -49,13 +31,8 @@ print(str(eval_result))
 
 You'll get back either a `YES` or `NO` response.
 
-#### Diagram
-
 ![](/_static/evaluation/eval_response_context.png)
 
-
-
-
 ### Sources Evaluation
 
 This mode of evaluation will return "YES"/"NO" for every source node.
@@ -84,21 +61,7 @@ print(str(eval_result))
 
 You'll get back a list of "YES"/"NO", corresponding to each source node in `response.source_nodes`.
 
-### Notebook
-
-Take a look at this [notebook](https://github.com/jerryjliu/llama_index/blob/main/examples/evaluation/TestNYC-Evaluation.ipynb).
-
-
-## Evaluation of the Query + Response + Source Context
-
-This is similar to the above section, except now we also take into account the query. The goal is to determine if
-the response + source context answers the query.
-
-As with the above, there are two submodes of evaluation. 
-- We can either get a binary response "YES"/"NO" on whether
-the response matches the query, and whether any source node also matches the query.
-- We can also ignore the synthesized response, and check every source node to see
-if it matches the query.
+## Evaluting Query + Response for Answer Quality
 
 ### Binary Evaluation
 
@@ -106,11 +69,12 @@ This mode of evaluation will return "YES"/"NO" if the synthesized response match
 
 ```python
 from llama_index import VectorStoreIndex
+from llama_index.llms import OpenAI
 from llama_index.evaluation import QueryResponseEvaluator
 
 # build service context
-llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-4"))
-service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
+llm = OpenAI(model="gpt-4", temperature=0.0)
+service_context = ServiceContext.from_defaults(llm=llm)
 
 # build index
 ...
@@ -126,11 +90,8 @@ print(str(eval_result))
 
 ```
 
-#### Diagram
-
 ![](/_static/evaluation/eval_query_response_context.png)
 
-
 ### Sources Evaluation
 
 This mode of evaluation will look at each source node, and see if each source node contains an answer to the query.
@@ -154,25 +115,27 @@ query_engine = vector_index.as_query_engine()
 response = query_engine.query("What battles took place in New York City in the American Revolution?")
 eval_result = evaluator.evaluate_source_nodes(response)
 print(str(eval_result))
-
 ```
 
-#### Diagram
-
 ![](/_static/evaluation/eval_query_sources.png)
 
-### Notebook
+## Question Generation
 
-Take a look at this [notebook](https://github.com/jerryjliu/llama_index/blob/main/examples/evaluation/TestNYC-Evaluation-Query.ipynb).
+LlamaIndex can also generate questions to answer using your data. Using in combination with the above evaluators, you can create a fully automated evaluation pipeline over your data.
 
+```python
+from llama_index import SimpleDirectoryReader
+from llama_index.evaluation import ResponseEvaluator
 
-```{toctree}
----
-caption: Examples
-maxdepth: 1
----
+# build service context
+llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-4"))
+service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
 
-../../examples/evaluation/TestNYC-Evaluation.ipynb
-../../examples/evaluation/TestNYC-Evaluation-Query.ipynb
-../../examples/evaluation/QuestionGeneration.ipynb
-```
\ No newline at end of file
+# build documents
+documents = SimpleDirectoryReader("./data").load_data()
+
+# define genertor, generate questions
+data_generator = DatasetGenerator.from_documents(documents)
+
+eval_questions = data_generator.generate_questions_from_nodes()
+```
diff --git a/docs/how_to/analysis/playground.md b/docs/core_modules/supporting_modules/playground/root.md
similarity index 80%
rename from docs/how_to/analysis/playground.md
rename to docs/core_modules/supporting_modules/playground/root.md
index f5d851a93e..7ca4c563f0 100644
--- a/docs/how_to/analysis/playground.md
+++ b/docs/core_modules/supporting_modules/playground/root.md
@@ -1,12 +1,14 @@
 # Playground
 
+## Concept
+
 The Playground module in LlamaIndex is a way to automatically test your data (i.e. documents) across a diverse combination of indices, models, embeddings, modes, etc. to decide which ones are best for your purposes. More options will continue to be added.
 
 For each combination, you'll be able to compare the results for any query and compare the answers, latency, tokens used, and so on.
 
 You may initialize a Playground with a list of pre-built indices, or initialize one from a list of Documents using the preset indices.
 
-### Sample Code
+## Usage Pattern
 
 A sample usage is given below.
 
@@ -32,20 +34,11 @@ playground.compare("What is the population of Berlin?")
 
 ```
 
-### API Reference
-
-[API Reference here](/reference/playground.rst)
-
-
-### Example Notebook
-
-[Link to Example Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/analysis/PlaygroundDemo.ipynb).  
-
+## Modules
 
 ```{toctree}
 ---
-caption: Examples
 maxdepth: 1
 ---
-../../examples/analysis/PlaygroundDemo.ipynb
+../../../examples/analysis/PlaygroundDemo.ipynb
 ```
\ No newline at end of file
diff --git a/docs/core_modules/supporting_modules/service_context.md b/docs/core_modules/supporting_modules/service_context.md
new file mode 100644
index 0000000000..a93e3cde7f
--- /dev/null
+++ b/docs/core_modules/supporting_modules/service_context.md
@@ -0,0 +1,103 @@
+# ServiceContext
+
+## Concept
+The `ServiceContext` is a bundle of commonly used resources used during the indexing and querying stage in a LlamaIndex pipeline/application.
+You can use it to set the [global configuration](#setting-global-configuration), as well as [local configurations](#setting-local-configuration) at specific parts of the pipeline.
+
+## Usage Pattern
+
+### Configuring the service context
+The `ServiceContext` is a simple python dataclass that you can directly construct by passing in the desired components.
+
+```python
+@dataclass
+class ServiceContext:
+    # The LLM used to generate natural language responses to queries.
+    llm_predictor: BaseLLMPredictor
+
+    # The PromptHelper object that helps with truncating and repacking text chunks to fit in the LLM's context window.
+    prompt_helper: PromptHelper
+
+    # The embedding model used to generate vector representations of text.
+    embed_model: BaseEmbedding
+
+    # The parser that converts documents into nodes.
+    node_parser: NodeParser
+
+    # The callback manager object that calls it's handlers on events. Provides basic logging and tracing capabilities.
+    callback_manager: CallbackManager
+
+    @classmethod
+    def from_defaults(cls, ...) -> "ServiceContext":
+      ... 
+```
+
+```{tip}
+Learn how to configure specific modules:
+- [LLM](/core_modules/model_modules/llms/usage_custom.md)
+- [Embedding Model](/core_modules/model_modules/embeddings/usage_pattern.md)
+- [Node Parser](/core_modules/data_modules/node_parsers/usage_pattern.md)
+
+```
+
+We also expose some common kwargs (of the above components) via the `ServiceContext.from_defaults` method
+for convenience (so you don't have to manually construct them).
+ 
+**Kwargs for node parser**:
+- `chunk_size`: The size of the text chunk for a node . Is used for the node parser when they aren't provided.
+- `chunk_overlap`: The amount of overlap between nodes (i.e. text chunks).
+
+**Kwargs for prompt helper**:
+- `context_window`: The size of the context window of the LLM. Typically we set this 
+  automatically with the model metadata. But we also allow explicit override via this parameter
+  for additional control (or in case the default is not available for certain latest
+  models)
+- `num_output`: The number of maximum output from the LLM. Typically we set this
+  automatically given the model metadata. This parameter does not actually limit the model
+  output, it affects the amount of "space" we save for the output, when computing 
+  available context window size for packing text from retrieved Nodes.
+
+Here's a complete example that sets up all objects using their default settings:
+
+```python
+from llama_index import ServiceContext, LLMPredictor, OpenAIEmbedding, PromptHelper
+from llama_index.llms import OpenAI
+from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
+from llama_index.node_parser import SimpleNodeParser
+
+llm = OpenAI(model='text-davinci-003', temperature=0, max_tokens=256)
+embed_model = OpenAIEmbedding()
+node_parser = SimpleNodeParser(
+  text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20)
+)
+prompt_helper = PromptHelper(
+  context_window=4096, 
+  num_output=256, 
+  chunk_overlap_ratio=0.1, 
+  chunk_size_limit=None
+)
+
+service_context = ServiceContext.from_defaults(
+  llm=llm,
+  embed_model=embed_model,
+  node_parser=node_parser,
+  prompt_helper=prompt_helper
+)
+```
+
+### Setting global configuration
+You can set a service context as the global default that applies to the entire LlamaIndex pipeline:
+
+```python
+from llama_index import set_global_service_context
+set_global_service_context(service_context)
+```
+
+### Setting local configuration
+You can pass in a service context to specific part of the pipeline to override the default configuration: 
+
+```python
+query_engine = index.as_query_engine(service_context=service_context)
+response = query_engine.query("What did the author do growing up?")
+print(response)
+```
\ No newline at end of file
diff --git a/docs/use_cases/agents.md b/docs/end_to_end_tutorials/agents.md
similarity index 91%
rename from docs/use_cases/agents.md
rename to docs/end_to_end_tutorials/agents.md
index 3e8e93227e..728ccebf2f 100644
--- a/docs/use_cases/agents.md
+++ b/docs/end_to_end_tutorials/agents.md
@@ -27,7 +27,7 @@ a more information + a detailed analysis.
 
 ### "Agent-like" Components within LlamaIndex 
 
-LlamaIndex provides core modules capable of automated reasoning for different use cases over your data. Please check out our [use cases doc](/use_cases/queries.md) for more details on high-level use cases that LlamaIndex can help fulfill.
+LlamaIndex provides core modules capable of automated reasoning for different use cases over your data. Please check out our [use cases doc](/end_to_end_tutorials/use_cases.md) for more details on high-level use cases that LlamaIndex can help fulfill.
 
 Some of these core modules are shown below along with example tutorials (not comprehensive, please click into the guides/how-tos for more details).
 
@@ -39,7 +39,7 @@ Some of these core modules are shown below along with example tutorials (not com
 
 
 **Query Transformations**
-- [How-To](/how_to/query_engine/advanced/query_transformations.md)
+- [How-To](/core_modules/query_modules/query_engine/advanced/query_transformations.md)
 - [Multi-Step Query Decomposition](/examples/query_transformations/HyDEQueryTransformDemo.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/query_transformations/HyDEQueryTransformDemo.ipynb))
 
 **Routing**
@@ -47,11 +47,11 @@ Some of these core modules are shown below along with example tutorials (not com
 - [Router Query Engine Guide](/examples/query_engine/RouterQueryEngine.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/query_engine/RouterQueryEngine.ipynb))
 
 **LLM Reranking**
-- [Second Stage Processing How-To](/how_to/query_engine/advanced/second_stage.md)
+- [Second Stage Processing How-To](/core_modules/query_modules/node_postprocessors/root.md)
 - [LLM Reranking Guide (Great Gatsby)](/examples/node_postprocessor/LLMReranker-Gatsby.ipynb)
 
 **Chat Engines**
-- [Chat Engines How-To](../how_to/chat_engine/root.md)
+- [Chat Engines How-To](/core_modules/query_modules/chat_engines/root.md)
 
 
 ### Using LlamaIndex as as Tool within an Agent Framework
@@ -64,7 +64,7 @@ We have deep integrations with LangChain.
 LlamaIndex query engines can be easily packaged as Tools to be used within a LangChain agent, and LlamaIndex can also be used as a memory module / retriever. Check out our guides/tutorials below!
 
 **Resources**
-- [LangChain integration guide](/how_to/integrations/using_with_langchain.md)
+- [LangChain integration guide](/community/integrations/using_with_langchain.md)
 - [Building a Chatbot Tutorial (LangChain + LlamaIndex)](/guides/tutorials/building_a_chatbot.md)
 - [OnDemandLoaderTool Tutorial](/examples/tools/OnDemandLoaderTool.ipynb)
 
diff --git a/docs/end_to_end_tutorials/apps.md b/docs/end_to_end_tutorials/apps.md
new file mode 100644
index 0000000000..61b1a0d2bc
--- /dev/null
+++ b/docs/end_to_end_tutorials/apps.md
@@ -0,0 +1,13 @@
+
+# Full-Stack Web Application
+
+LlamaIndex can be integrated into a downstream full-stack web application. It can be used in a backend server (such as Flask), packaged into a Docker container, and/or directly used in a framework such as Streamlit.
+
+We provide tutorials and resources to help you get started in this area.
+
+Relevant Resources:
+- [Fullstack Application Guide](/end_to_end_tutorials/apps/fullstack_app_guide.md)
+- [Fullstack Application with Delphic](/end_to_end_tutorials/apps/fullstack_with_delphic.md)
+- [A Guide to Extracting Terms and Definitions](/end_to_end_tutorials/question_and_answer/terms_definitions_tutorial.md)
+- [LlamaIndex Starter Pack](https://github.com/logan-markewich/llama_index_starter_pack)
+
diff --git a/docs/guides/tutorials/fullstack_app_guide.md b/docs/end_to_end_tutorials/apps/fullstack_app_guide.md
similarity index 100%
rename from docs/guides/tutorials/fullstack_app_guide.md
rename to docs/end_to_end_tutorials/apps/fullstack_app_guide.md
diff --git a/docs/guides/tutorials/fullstack_with_delphic.md b/docs/end_to_end_tutorials/apps/fullstack_with_delphic.md
similarity index 100%
rename from docs/guides/tutorials/fullstack_with_delphic.md
rename to docs/end_to_end_tutorials/apps/fullstack_with_delphic.md
diff --git a/docs/end_to_end_tutorials/chatbots.md b/docs/end_to_end_tutorials/chatbots.md
new file mode 100644
index 0000000000..3756835c7d
--- /dev/null
+++ b/docs/end_to_end_tutorials/chatbots.md
@@ -0,0 +1,7 @@
+# Chatbots
+
+Chatbots are an incredibly popular use case for LLM's. LlamaIndex gives you the tools to build Knowledge-augmented chatbots and agents.
+
+Relevant Resources:
+- [Building a Chatbot](/end_to_end_tutorials/chatbots/building_a_chatbot.md)
+- [Using with a LangChain Agent](/community/integrations/using_with_langchain.md)
\ No newline at end of file
diff --git a/docs/guides/tutorials/building_a_chatbot.md b/docs/end_to_end_tutorials/chatbots/building_a_chatbot.md
similarity index 100%
rename from docs/guides/tutorials/building_a_chatbot.md
rename to docs/end_to_end_tutorials/chatbots/building_a_chatbot.md
diff --git a/docs/guides/tutorials/discover_llamaindex.md b/docs/end_to_end_tutorials/discover_llamaindex.md
similarity index 93%
rename from docs/guides/tutorials/discover_llamaindex.md
rename to docs/end_to_end_tutorials/discover_llamaindex.md
index 2d404acb0f..b6249f3ec1 100644
--- a/docs/guides/tutorials/discover_llamaindex.md
+++ b/docs/end_to_end_tutorials/discover_llamaindex.md
@@ -18,7 +18,7 @@ This video covers managing documents from a source that is consantly updating (i
 
 [Notebook + Supplimentary Material](https://github.com/jerryjliu/llama_index/tree/main/docs/examples/discover_llamaindex/document_management/)
 
-[Reference Docs](../../how_to/index/document_management.md)
+[Reference Docs](../../core_modules/data_modules/index/document_management.md)
 
 ## Joint Text to SQL and Semantic Search
 
diff --git a/docs/end_to_end_tutorials/privacy.md b/docs/end_to_end_tutorials/privacy.md
new file mode 100644
index 0000000000..20745498e5
--- /dev/null
+++ b/docs/end_to_end_tutorials/privacy.md
@@ -0,0 +1,5 @@
+# Private Setup
+
+Relevant Resources:
+- [Using LlamaIndex with Local Models](https://colab.research.google.com/drive/16QMQePkONNlDpgiltOi7oRQgmB8dU5fl?usp=sharing)
+
diff --git a/docs/use_cases/queries.md b/docs/end_to_end_tutorials/question_and_answer.md
similarity index 83%
rename from docs/use_cases/queries.md
rename to docs/end_to_end_tutorials/question_and_answer.md
index 0d3b1cacfc..0d4e97670e 100644
--- a/docs/use_cases/queries.md
+++ b/docs/end_to_end_tutorials/question_and_answer.md
@@ -1,4 +1,4 @@
-# Queries over your Data
+# Q&A over Documents
 
 At a high-level, LlamaIndex gives you the ability to query your data for any downstream LLM use case,
 whether it's question-answering, summarization, or a component in a chatbot.
@@ -10,7 +10,7 @@ of simplest (top-k semantic search), to more advanced capabilities.
 
 The most basic example usage of LlamaIndex is through semantic search. We provide
 a simple in-memory vector store for you to get started, but you can also choose
-to use any one of our [vector store integrations](/how_to/integrations/vector_stores.md):
+to use any one of our [vector store integrations](/community/integrations/vector_stores.md):
 
 ```python
 from llama_index import VectorStoreIndex, SimpleDirectoryReader
@@ -23,7 +23,8 @@ print(response)
 ```
 
 **Tutorials**
-- [Quickstart](/getting_started/starter_example.md)
+- [Starter Tutorial](/getting_started/starter_example.md)
+- [Basic Usage Pattern](/end_to_end_tutorials/usage_pattern.md)
 
 **Guides**
 - [Example](../examples/vector_stores/SimpleIndexDemo.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/tree/main/docs/examples/vector_stores/SimpleIndexDemo.ipynb))
@@ -84,8 +85,8 @@ response = query_engine.query("<query_str>")
 ```
 
 **Guides**
-- [Composability](../how_to/index/composability.md)
-- [City Analysis](../examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb))
+- [Composability](/core_modules/data_modules/index/composability.md)
+- [City Analysis](/examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/composable_indices/city_analysis/PineconeDemo-CityAnalysis.ipynb))
 
 
 
@@ -152,8 +153,8 @@ decompose_transform = DecomposeQueryTransform(
 This module will help break down a complex query into a simpler one over your existing index structure.
 
 **Guides**
-- [Query Transformations](../how_to/query_engine/advanced/query_transformations.md)
-- [City Analysis Compare/Contrast Example](../examples//composable_indices/city_analysis/City_Analysis-Decompose.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/composable_indices/city_analysis/City_Analysis-Decompose.ipynb))
+- [Query Transformations](/core_modules/query_modules/query_engine/advanced/query_transformations.md)
+- [City Analysis Compare/Contrast Example](/examples/composable_indices/city_analysis/City_Analysis-Decompose.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/composable_indices/city_analysis/City_Analysis-Decompose.ipynb))
 
 You can also rely on the LLM to *infer* whether to perform compare/contrast queries (see Multi-Document Queries below).
 
@@ -212,7 +213,7 @@ the module will first decompose the query into a simpler initial question "What
 query the index, and then ask followup questions.
 
 **Guides**
-- [Query Transformations](../how_to/query_engine/advanced/query_transformations.md)
+- [Query Transformations](/core_modules/query_modules/query_engine/advanced/query_transformations.md)
 - [Multi-Step Query Decomposition](../examples/query_transformations/HyDEQueryTransformDemo.ipynb) ([Notebook](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/query_transformations/HyDEQueryTransformDemo.ipynb))
 
 
@@ -223,7 +224,11 @@ LlamaIndex can support queries that require an understanding of time. It can do
 - Sort by recency and filter outdated context.
 
 **Guides**
-- [Second-Stage Postprocessing Guide](../how_to/query_engine/advanced/second_stage.md)
-- [Prev/Next Postprocessing](../examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb)
-- [Recency Postprocessing](../examples/node_postprocessor/RecencyPostprocessorDemo.ipynb)
-
+- [Second-Stage Postprocessing Guide](/core_modules/query_modules/node_postprocessors/root.md)
+- [Prev/Next Postprocessing](/examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb)
+- [Recency Postprocessing](/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb)
+
+### Additional Resources
+- [A Guide to Creating a Unified Query Framework over your ndexes](/end_to_end_tutorials/question_and_answer/unified_query.md)
+- [A Guide to Extracting Terms and Definitions](/end_to_end_tutorials/question_and_answer/terms_definitions_tutorial.md)
+- [SEC 10k Analysis](https://medium.com/@jerryjliu98/how-unstructured-and-llamaindex-can-help-bring-the-power-of-llms-to-your-own-data-3657d063e30d)
\ No newline at end of file
diff --git a/docs/guides/tutorials/terms_definitions_tutorial.md b/docs/end_to_end_tutorials/question_and_answer/terms_definitions_tutorial.md
similarity index 97%
rename from docs/guides/tutorials/terms_definitions_tutorial.md
rename to docs/end_to_end_tutorials/question_and_answer/terms_definitions_tutorial.md
index d3b3635be7..29e5c032cc 100644
--- a/docs/guides/tutorials/terms_definitions_tutorial.md
+++ b/docs/end_to_end_tutorials/question_and_answer/terms_definitions_tutorial.md
@@ -1,6 +1,6 @@
 # A Guide to Extracting Terms and Definitions
 
-Llama Index has many use cases (semantic search, summarization, etc.) that are [well documented](https://gpt-index.readthedocs.io/en/latest/use_cases/queries.html). However, this doesn't mean we can't apply Llama Index to very specific use cases!
+Llama Index has many use cases (semantic search, summarization, etc.) that are [well documented](/end_to_end_tutorials/use_cases.md). However, this doesn't mean we can't apply Llama Index to very specific use cases!
 
 In this tutorial, we will go through the design process of using Llama Index to extract terms and definitions from text, while allowing users to query those terms later. Using [Streamlit](https://streamlit.io/), we can provide an easy way to build frontend for running and testing all of this, and quickly iterate with our design.
 
diff --git a/docs/guides/tutorials/unified_query.md b/docs/end_to_end_tutorials/question_and_answer/unified_query.md
similarity index 99%
rename from docs/guides/tutorials/unified_query.md
rename to docs/end_to_end_tutorials/question_and_answer/unified_query.md
index 14260412c0..1fe59a4fe1 100644
--- a/docs/guides/tutorials/unified_query.md
+++ b/docs/end_to_end_tutorials/question_and_answer/unified_query.md
@@ -1,6 +1,6 @@
 # A Guide to Creating a Unified Query Framework over your Indexes
 
-LlamaIndex offers a variety of different [query use cases](/use_cases/queries.md).
+LlamaIndex offers a variety of different [use cases](/end_to_end_tutorials/use_cases.md).
 
 For simple queries, we may want to use a single index data structure, such as a `VectorStoreIndex` for semantic search, or `ListIndex` for summarization.
 
diff --git a/docs/end_to_end_tutorials/structured_data.md b/docs/end_to_end_tutorials/structured_data.md
new file mode 100644
index 0000000000..eceb8a6cb9
--- /dev/null
+++ b/docs/end_to_end_tutorials/structured_data.md
@@ -0,0 +1,5 @@
+# Structured Data
+
+Relevant Resources:
+- [A Guide to LlamaIndex + Structured Data](/end_to_end_tutorials/structured_data/sql_guide.md)
+- [Airbyte SQL Index Guide](/end_to_end_tutorials/structured_data/Airbyte_demo.ipynb)
\ No newline at end of file
diff --git a/docs/guides/tutorials/Airbyte_demo.ipynb b/docs/end_to_end_tutorials/structured_data/Airbyte_demo.ipynb
similarity index 100%
rename from docs/guides/tutorials/Airbyte_demo.ipynb
rename to docs/end_to_end_tutorials/structured_data/Airbyte_demo.ipynb
diff --git a/docs/guides/tutorials/img/airbyte_1.png b/docs/end_to_end_tutorials/structured_data/img/airbyte_1.png
similarity index 100%
rename from docs/guides/tutorials/img/airbyte_1.png
rename to docs/end_to_end_tutorials/structured_data/img/airbyte_1.png
diff --git a/docs/guides/tutorials/img/airbyte_3.png b/docs/end_to_end_tutorials/structured_data/img/airbyte_3.png
similarity index 100%
rename from docs/guides/tutorials/img/airbyte_3.png
rename to docs/end_to_end_tutorials/structured_data/img/airbyte_3.png
diff --git a/docs/guides/tutorials/img/airbyte_6.png b/docs/end_to_end_tutorials/structured_data/img/airbyte_6.png
similarity index 100%
rename from docs/guides/tutorials/img/airbyte_6.png
rename to docs/end_to_end_tutorials/structured_data/img/airbyte_6.png
diff --git a/docs/guides/tutorials/img/airbyte_7.png b/docs/end_to_end_tutorials/structured_data/img/airbyte_7.png
similarity index 100%
rename from docs/guides/tutorials/img/airbyte_7.png
rename to docs/end_to_end_tutorials/structured_data/img/airbyte_7.png
diff --git a/docs/guides/tutorials/img/airbyte_8.png b/docs/end_to_end_tutorials/structured_data/img/airbyte_8.png
similarity index 100%
rename from docs/guides/tutorials/img/airbyte_8.png
rename to docs/end_to_end_tutorials/structured_data/img/airbyte_8.png
diff --git a/docs/guides/tutorials/img/airbyte_9.png b/docs/end_to_end_tutorials/structured_data/img/airbyte_9.png
similarity index 100%
rename from docs/guides/tutorials/img/airbyte_9.png
rename to docs/end_to_end_tutorials/structured_data/img/airbyte_9.png
diff --git a/docs/guides/tutorials/img/github_1.png b/docs/end_to_end_tutorials/structured_data/img/github_1.png
similarity index 100%
rename from docs/guides/tutorials/img/github_1.png
rename to docs/end_to_end_tutorials/structured_data/img/github_1.png
diff --git a/docs/guides/tutorials/img/github_2.png b/docs/end_to_end_tutorials/structured_data/img/github_2.png
similarity index 100%
rename from docs/guides/tutorials/img/github_2.png
rename to docs/end_to_end_tutorials/structured_data/img/github_2.png
diff --git a/docs/guides/tutorials/img/github_3.png b/docs/end_to_end_tutorials/structured_data/img/github_3.png
similarity index 100%
rename from docs/guides/tutorials/img/github_3.png
rename to docs/end_to_end_tutorials/structured_data/img/github_3.png
diff --git a/docs/guides/tutorials/img/snowflake_1.png b/docs/end_to_end_tutorials/structured_data/img/snowflake_1.png
similarity index 100%
rename from docs/guides/tutorials/img/snowflake_1.png
rename to docs/end_to_end_tutorials/structured_data/img/snowflake_1.png
diff --git a/docs/guides/tutorials/img/snowflake_2.png b/docs/end_to_end_tutorials/structured_data/img/snowflake_2.png
similarity index 100%
rename from docs/guides/tutorials/img/snowflake_2.png
rename to docs/end_to_end_tutorials/structured_data/img/snowflake_2.png
diff --git a/docs/guides/tutorials/sql_guide.md b/docs/end_to_end_tutorials/structured_data/sql_guide.md
similarity index 100%
rename from docs/guides/tutorials/sql_guide.md
rename to docs/end_to_end_tutorials/structured_data/sql_guide.md
diff --git a/docs/guides/primer/usage_pattern.md b/docs/end_to_end_tutorials/usage_pattern.md
similarity index 91%
rename from docs/guides/primer/usage_pattern.md
rename to docs/end_to_end_tutorials/usage_pattern.md
index a1edc9a64c..74f5bae33d 100644
--- a/docs/guides/primer/usage_pattern.md
+++ b/docs/end_to_end_tutorials/usage_pattern.md
@@ -1,4 +1,4 @@
-# LlamaIndex Usage Pattern
+# Basic Usage Pattern
 
 The general usage pattern of LlamaIndex is as follows:
 
@@ -11,7 +11,7 @@ The general usage pattern of LlamaIndex is as follows:
 ## 1. Load in Documents
 
 The first step is to load in data. This data is represented in the form of `Document` objects.
-We provide a variety of [data loaders](/how_to/examples/data_connectors.md) which will load in Documents
+We provide a variety of [data loaders](/core_modules/data_modules/connector/root.md) which will load in Documents
 through the `load_data` function, e.g.:
 
 ```python
@@ -140,7 +140,7 @@ index = VectorStoreIndex([])
 index.insert_nodes(nodes)
 ```
 
-See the [Document Management How-To](/how_to/index/document_management.md) for more details on managing documents and an example notebook.
+See the [Document Management How-To](/core_modules/data_modules/index/document_management.md) for more details on managing documents and an example notebook.
 
 ### Customizing Documents
 
@@ -156,7 +156,7 @@ document = Document(
 )
 ```
 
-More information and approaches to this are discussed in the section [Customizing Documents](/how_to/customization/custom_documents.md).
+More information and approaches to this are discussed in the section [Customizing Documents](/core_modules/data_modules/documents_and_nodes/usage_documents.md).
 
 ### Customizing LLM's
 
@@ -181,7 +181,7 @@ index = VectorStoreIndex.from_documents(
 )
 ```
 
-See the [Custom LLM's How-To](/how_to/customization/custom_llms.md) for more details.
+See the [Custom LLM's How-To](/core_modules/model_modules/llms/usage_custom.md) for more details.
 
 ### Global ServiceContext
 
@@ -194,26 +194,26 @@ set_global_service_context(service_context)
 
 This service context will always be used as the default if not specified as a keyword argument in LlamaIndex functions.
 
-For more details on the service context, including how to create a global service context, see the page [Customizing the ServiceContext](/how_to/customization/service_context.md).
+For more details on the service context, including how to create a global service context, see the page [Customizing the ServiceContext](/core_modules/supporting_modules/service_context.md).
 
 ### Customizing Prompts
 
 Depending on the index used, we used default prompt templates for constructing the index (and also insertion/querying).
-See [Custom Prompts How-To](/how_to/customization/custom_prompts.md) for more details on how to customize your prompt.
+See [Custom Prompts How-To](/core_modules/model_modules/prompts.md) for more details on how to customize your prompt.
 
 ### Customizing embeddings
 
 For embedding-based indices, you can choose to pass in a custom embedding model. See
-[Custom Embeddings How-To](custom-embeddings) for more details.
+[Custom Embeddings How-To](/core_modules/model_modules/embeddings/usage_pattern.md) for more details.
 
-### Cost Predictor
+### Cost Analysis 
 
 Creating an index, inserting to an index, and querying an index may use tokens. We can track
 token usage through the outputs of these operations. When running operations,
 the token usage will be printed.
 
 You can also fetch the token usage through `index.llm_predictor.last_token_usage`.
-See [Cost Predictor How-To](/how_to/analysis/cost_analysis.md) for more details.
+See [Cost Analysis How-To](/core_modules/supporting_modules/cost_analysis/usage_pattern.md) for more details.
 
 ### [Optional] Save the index for future use
 
@@ -264,7 +264,7 @@ index = load_index_from_storage(
 
 You can build indices on top of other indices!
 Composability gives you greater power in indexing your heterogeneous sources of data. For a discussion on relevant use cases,
-see our [Query Use Cases](/use_cases/queries.md). For technical details and examples, see our [Composability How-To](/how_to/index/composability.md).
+see our [Query Use Cases](/end_to_end_tutorials/question_and_answer.md). For technical details and examples, see our [Composability How-To](/core_modules/data_modules/index/composability.md).
 
 ## 5. Query the index.
 
@@ -327,7 +327,7 @@ print(response)
 
 You may also add your own retrieval, response synthesis, and overall query logic, by implementing the corresponding interfaces.
 
-For a full list of implemented components and the supported configurations, please see the detailed [reference docs](/reference/query.rst).
+For a full list of implemented components and the supported configurations, please see the detailed [reference docs](/api_reference/query.rst).
 
 In the following, we discuss some commonly used configurations in detail.
 
@@ -353,7 +353,7 @@ query_engine = RetrieverQueryEngine(retriever)
 response = query_engine.query("What did the author do growing up?")
 ```
 
-The full list of retrievers for each index (and their shorthand) is documented in the [Query Reference](/reference/query.rst).
+The full list of retrievers for each index (and their shorthand) is documented in the [Query Reference](/api_reference/query.rst).
 
 (setting-response-mode)=
 
@@ -416,7 +416,7 @@ For example:
 - `SimilarityPostprocessor`: filters nodes by setting a threshold on the similarity score (thus only supported by embedding-based retrievers)
 - `PrevNextNodePostprocessor`: augments retrieved `Node` objects with additional relevant context based on `Node` relationships.
 
-The full list of node postprocessors is documented in the [Node Postprocessor Reference](/reference/node_postprocessor.rst).
+The full list of node postprocessors is documented in the [Node Postprocessor Reference](/api_reference/node_postprocessor.rst).
 
 To configure the desired node postprocessors:
 
@@ -435,7 +435,7 @@ response = query_engine.query("What did the author do growing up?")
 
 ## 5. Parsing the response
 
-The object returned is a [`Response` object](/reference/response.rst).
+The object returned is a [`Response` object](/api_reference/response.rst).
 The object contains both the response text as well as the "sources" of the response:
 
 ```python
diff --git a/docs/end_to_end_tutorials/use_cases.md b/docs/end_to_end_tutorials/use_cases.md
new file mode 100644
index 0000000000..6f4ca2add3
--- /dev/null
+++ b/docs/end_to_end_tutorials/use_cases.md
@@ -0,0 +1,17 @@
+# Use Cases
+
+```{toctree}
+---
+maxdepth: 1
+---
+/end_to_end_tutorials/question_and_answer.md
+/end_to_end_tutorials/chatbots.md
+/end_to_end_tutorials/agents.md
+/end_to_end_tutorials/structured_data.md
+/end_to_end_tutorials/apps.md
+/end_to_end_tutorials/privacy.md
+```
+
+
+
+
diff --git a/docs/examples/analysis/PlaygroundDemo.ipynb b/docs/examples/analysis/PlaygroundDemo.ipynb
index 8e384160a6..45aa6bbac0 100644
--- a/docs/examples/analysis/PlaygroundDemo.ipynb
+++ b/docs/examples/analysis/PlaygroundDemo.ipynb
@@ -18,18 +18,20 @@
    "source": [
     "# My OpenAI Key\n",
     "import os\n",
+    "import openai\n",
     "\n",
-    "os.environ[\"OPENAI_API_KEY\"] = \"INSERT OPENAI KEY\""
+    "os.environ[\"OPENAI_API_KEY\"] = \"sk-....\"\n",
+    "openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
    ]
   },
   {
    "cell_type": "code",
    "execution_count": 2,
-   "id": "d726e871",
+   "id": "d572aa9a",
    "metadata": {},
    "outputs": [],
    "source": [
-    "# Hide INFO logs regarding token usage, etc\n",
+    "# Hide logs\n",
     "import logging\n",
     "\n",
     "logger = logging.getLogger()\n",
@@ -37,6 +39,7 @@
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "40cf0773",
    "metadata": {},
@@ -48,7 +51,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 3,
    "id": "fa34cd83",
    "metadata": {},
    "outputs": [],
@@ -64,6 +67,7 @@
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "0c32392b",
    "metadata": {},
@@ -73,20 +77,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": null,
+   "execution_count": 4,
    "id": "f59e6c18",
    "metadata": {},
-   "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "INFO:root:> [build_index_from_documents] Total LLM token usage: 0 tokens\n",
-      "INFO:root:> [build_index_from_documents] Total embedding token usage: 18344 tokens\n",
-      "INFO:root:> Building index from nodes: 5 chunks\n"
-     ]
-    }
-   ],
+   "outputs": [],
    "source": [
     "indices = [\n",
     "    VectorStoreIndex.from_documents(documents),\n",
@@ -95,6 +89,7 @@
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "827ada33",
    "metadata": {},
@@ -107,7 +102,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 4,
+   "execution_count": 5,
    "id": "a04e4535",
    "metadata": {},
    "outputs": [],
@@ -119,17 +114,10 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 5,
+   "execution_count": 6,
    "id": "5f6999fc",
    "metadata": {},
    "outputs": [
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "INFO:openai:error_code=None error_message='Rate limit reached for default-global-with-image-limits in organization org-ehTdCqs0FpsxuTTwsJIlNSdZ on requests per min. Limit: 60.000000 / min. Current: 110.000000 / min. Contact support@openai.com if you continue to have issues. Please add a payment method to your account to increase your rate limit. Visit https://platform.openai.com/account/billing to add a payment method.' error_param=None error_type=requests message='OpenAI API error received' stream_error=False\n"
-     ]
-    },
     {
      "name": "stdout",
      "output_type": "stream",
@@ -137,129 +125,42 @@
       "\u001b[1mQuery:\u001b[0m\n",
       "What is the population of Berlin?\n",
       "\n",
-      "Trying 10 combinations...\n",
-      "\n",
-      "\n",
-      "\u001b[1mGPTVectorStoreIndex\u001b[0m, mode = default\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "INFO:openai:error_code=None error_message='Rate limit reached for default-global-with-image-limits in organization org-ehTdCqs0FpsxuTTwsJIlNSdZ on requests per min. Limit: 60.000000 / min. Current: 90.000000 / min. Contact support@openai.com if you continue to have issues. Please add a payment method to your account to increase your rate limit. Visit https://platform.openai.com/account/billing to add a payment method.' error_param=None error_type=requests message='OpenAI API error received' stream_error=False\n",
-      "INFO:openai:error_code=None error_message='Rate limit reached for default-global-with-image-limits in organization org-ehTdCqs0FpsxuTTwsJIlNSdZ on requests per min. Limit: 60.000000 / min. Current: 90.000000 / min. Contact support@openai.com if you continue to have issues. Please add a payment method to your account to increase your rate limit. Visit https://platform.openai.com/account/billing to add a payment method.' error_param=None error_type=requests message='OpenAI API error received' stream_error=False\n",
-      "INFO:openai:error_code=None error_message='Rate limit reached for default-global-with-image-limits in organization org-ehTdCqs0FpsxuTTwsJIlNSdZ on requests per min. Limit: 60.000000 / min. Current: 80.000000 / min. Contact support@openai.com if you continue to have issues. Please add a payment method to your account to increase your rate limit. Visit https://platform.openai.com/account/billing to add a payment method.' error_param=None error_type=requests message='OpenAI API error received' stream_error=False\n",
-      "INFO:root:> [query] Total LLM token usage: 3545 tokens\n",
-      "INFO:root:> [query] Total embedding token usage: 7 tokens\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
+      "\u001b[1mVectorStoreIndex\u001b[0m, retriever mode = default\n",
       "\u001b[36;1m\u001b[1;3m\n",
-      "The population of Berlin in 1949 was approximately 2.2 million inhabitants. After the fall of the Berlin Wall in 1989, the population of Berlin increased to approximately 3.7 million inhabitants.\u001b[0m\n",
+      "The population of Berlin is approximately 3.7 million inhabitants.\u001b[0m\n",
       "\n",
-      "\u001b[1mGPTVectorStoreIndex\u001b[0m, mode = embedding\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "INFO:root:> [query] Total LLM token usage: 3545 tokens\n",
-      "INFO:root:> [query] Total embedding token usage: 7 tokens\n",
-      "INFO:root:> Starting query: What is the population of Berlin?\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[36;1m\u001b[1;3m\n",
-      "The population of Berlin in 1949 was approximately 2.2 million inhabitants. After the fall of the Berlin Wall in 1989, the population of Berlin increased to approximately 3.7 million inhabitants.\u001b[0m\n",
+      "\u001b[1mTreeIndex\u001b[0m, retriever mode = select_leaf\n",
+      "\u001b[33;1m\u001b[1;3m\n",
+      "It is not possible to answer this question with the given context information.\u001b[0m\n",
       "\n",
-      "\u001b[1mGPTTreeIndex\u001b[0m, mode = default\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "INFO:root:>[Level 0] Selected node: [1]/[1]\n",
-      "INFO:root:>[Level 1] Selected node: [3]/[3]\n",
-      "INFO:root:> [query] Total LLM token usage: 5168 tokens\n",
-      "INFO:root:> [query] Total embedding token usage: 0 tokens\n",
-      "INFO:root:> Starting query: What is the population of Berlin?\n",
-      "INFO:root:> Building index from nodes: 6 chunks\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33;1m\u001b[1;3mThe population of Berlin is approximately 3.7 million people.\u001b[0m\n",
+      "\u001b[1mTreeIndex\u001b[0m, retriever mode = select_leaf_embedding\n",
+      "\u001b[33;1m\u001b[1;3m\n",
+      "The population of Berlin is approximately 3.7 million inhabitants.\u001b[0m\n",
       "\n",
-      "\u001b[1mGPTTreeIndex\u001b[0m, mode = summarize\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "INFO:root:> [query] Total LLM token usage: 21617 tokens\n",
-      "INFO:root:> [query] Total embedding token usage: 0 tokens\n",
-      "INFO:root:> Starting query: What is the population of Berlin?\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
+      "\u001b[1mTreeIndex\u001b[0m, retriever mode = all_leaf\n",
       "\u001b[33;1m\u001b[1;3m\n",
-      "The population of Berlin is approximately 3.7 million people.\u001b[0m\n",
       "\n",
-      "\u001b[1mGPTTreeIndex\u001b[0m, mode = embedding\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "INFO:root:> [query] Total LLM token usage: 368 tokens\n",
-      "INFO:root:> [query] Total embedding token usage: 4598 tokens\n",
-      "INFO:root:> Starting query: What is the population of Berlin?\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "\u001b[33;1m\u001b[1;3mApproximately 3.7 million people.\u001b[0m\n",
+      "The population of Berlin is approximately 3.75 million inhabitants. This population has been shaped by the city's turbulent history, with Jewish emigration during the 1930s, the destruction of the city during World War II, and the division of the city into East and West Berlin during the Cold War. Since the reunification of Germany in 1990, Berlin has seen a surge in population growth, with many people from other parts of Germany and the world moving to the city. At the end of 2019, the population of Berlin was estimated to be around 3.75 million inhabitants. The city is home to a diverse religious population, with the faithful of the different religions and denominations maintaining many places of worship in Berlin, including eight parishes of the Independent Evangelical Lutheran Church, 36 Baptist congregations, 29 New Apostolic Churches, 15 United Methodist churches, eight Free Evangelical Congregations, four Churches of Christ, Scientist (1st, 2nd, 3rd, and 11th), six congregations of the Church of Jesus Christ of Latter-day Saints, an Old Catholic church, an Anglican church, more than 80 mosques, ten synagogues, and two Buddhist temples. Berlin is also home to a large number of immigrants from around the world, with 48 percent of the residents under the age of 15 having a migration background in 2017. Berlin is a major economic center in Europe, with many international companies and organizations based in the city, such as the Fraunhofer Society, the Leibniz Association, the Helmholtz Association, and the Max Planck Society, as well as a large number of tourists visiting each year. The city is well-connected to the rest of Germany and Europe through its extensive road, rail, and air transport networks, making it an attractive destination for business and leisure travelers alike. It is also home to a number of renowned research institutions, universities, and medical schools, as well as seven symphony orchestras, including the world-renowned Berlin Philharmonic Orchestra, the Konzerthausorchester Berlin, and the Haus der Kulturen der Welt. Berlin is home to a vibrant cultural and entertainment scene, with a diverse range of cuisine, including Michelin-starred restaurants, vegetarian and vegan offerings, street food, and international cuisine, as well as a variety of botanical gardens, zoos, and other recreational activities. This makes it an attractive destination for people from all over the world. Berlin is also home to two zoos, the Botanischer Garten, the Tiergarten park, and the Gärten der Welt, as well as many cafés, street musicians, beach bars, flea markets, and boutique shops. Berlin has established a high-profile as a host city of major international sporting events, such as the 1936 Summer Olympics, the 2006 FIFA World Cup final, the IAAF World Championships in Athletics, the Basketball Euroleague Final Four, the UEFA Champions League Final, and the 2023 Special Olympics World Summer Games. It is also home to several professional sports teams, such as Hertha BSC, and has a large Olympic training center.\u001b[0m\n",
       "\n",
-      "\u001b[1mGPTTreeIndex\u001b[0m, mode = retrieve\n"
-     ]
-    },
-    {
-     "name": "stderr",
-     "output_type": "stream",
-     "text": [
-      "INFO:root:> [query] Total LLM token usage: 1439 tokens\n",
-      "INFO:root:> [query] Total embedding token usage: 0 tokens\n"
-     ]
-    },
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
+      "\u001b[1mTreeIndex\u001b[0m, retriever mode = root\n",
       "\u001b[33;1m\u001b[1;3m\n",
-      "The population of Berlin is 3.75 million registered inhabitants.\u001b[0m\n",
+      "The population of Berlin is 3.7 million within city limits and 4.5 million in its urban area.\u001b[0m\n",
       "\n",
       "\n",
-      "Ran 6 combinations in total.\n"
+      "Ran 5 combinations in total.\n"
      ]
-    },
+    }
+   ],
+   "source": [
+    "result_df = playground.compare(\"What is the population of Berlin?\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "c0ad183e",
+   "metadata": {},
+   "outputs": [
     {
      "data": {
       "text/html": [
@@ -282,11 +183,12 @@
        "    <tr style=\"text-align: right;\">\n",
        "      <th></th>\n",
        "      <th>Index</th>\n",
-       "      <th>Mode</th>\n",
+       "      <th>Retriever Mode</th>\n",
        "      <th>Output</th>\n",
        "      <th>Duration</th>\n",
-       "      <th>LLM Tokens</th>\n",
-       "      <th>Embedding Tokens</th>\n",
+       "      <th>Prompt Tokens</th>\n",
+       "      <th>Completion Tokens</th>\n",
+       "      <th>Embed Tokens</th>\n",
        "    </tr>\n",
        "  </thead>\n",
        "  <tbody>\n",
@@ -294,54 +196,50 @@
        "      <th>0</th>\n",
        "      <td>VectorStoreIndex</td>\n",
        "      <td>default</td>\n",
-       "      <td>\\nThe population of Berlin in 1949 was approxi...</td>\n",
-       "      <td>52.319133</td>\n",
-       "      <td>3545</td>\n",
+       "      <td>\\nThe population of Berlin is approximately 3....</td>\n",
+       "      <td>2.525580</td>\n",
+       "      <td>1786</td>\n",
+       "      <td>13</td>\n",
        "      <td>7</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>1</th>\n",
-       "      <td>VectorStoreIndex</td>\n",
-       "      <td>embedding</td>\n",
-       "      <td>\\nThe population of Berlin in 1949 was approxi...</td>\n",
-       "      <td>8.192025</td>\n",
-       "      <td>3545</td>\n",
-       "      <td>7</td>\n",
+       "      <td>TreeIndex</td>\n",
+       "      <td>select_leaf</td>\n",
+       "      <td>\\nIt is not possible to answer this question w...</td>\n",
+       "      <td>5.536037</td>\n",
+       "      <td>4732</td>\n",
+       "      <td>115</td>\n",
+       "      <td>0</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>2</th>\n",
        "      <td>TreeIndex</td>\n",
-       "      <td>default</td>\n",
-       "      <td>The population of Berlin is approximately 3.7 ...</td>\n",
-       "      <td>12.542335</td>\n",
-       "      <td>5168</td>\n",
-       "      <td>0</td>\n",
+       "      <td>select_leaf_embedding</td>\n",
+       "      <td>\\nThe population of Berlin is approximately 3....</td>\n",
+       "      <td>5.426232</td>\n",
+       "      <td>897</td>\n",
+       "      <td>13</td>\n",
+       "      <td>9146</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>3</th>\n",
        "      <td>TreeIndex</td>\n",
-       "      <td>summarize</td>\n",
-       "      <td>\\nThe population of Berlin is approximately 3....</td>\n",
-       "      <td>18.665586</td>\n",
-       "      <td>21617</td>\n",
+       "      <td>all_leaf</td>\n",
+       "      <td>\\n\\nThe population of Berlin is approximately ...</td>\n",
+       "      <td>238.278128</td>\n",
+       "      <td>27291</td>\n",
+       "      <td>5035</td>\n",
        "      <td>0</td>\n",
        "    </tr>\n",
        "    <tr>\n",
        "      <th>4</th>\n",
        "      <td>TreeIndex</td>\n",
-       "      <td>embedding</td>\n",
-       "      <td>Approximately 3.7 million people.</td>\n",
-       "      <td>3.573458</td>\n",
-       "      <td>368</td>\n",
-       "      <td>4598</td>\n",
-       "    </tr>\n",
-       "    <tr>\n",
-       "      <th>5</th>\n",
-       "      <td>TreeIndex</td>\n",
-       "      <td>retrieve</td>\n",
-       "      <td>\\nThe population of Berlin is 3.75 million reg...</td>\n",
-       "      <td>2.269598</td>\n",
-       "      <td>1439</td>\n",
+       "      <td>root</td>\n",
+       "      <td>\\nThe population of Berlin is 3.7 million with...</td>\n",
+       "      <td>3.375349</td>\n",
+       "      <td>558</td>\n",
+       "      <td>23</td>\n",
        "      <td>0</td>\n",
        "    </tr>\n",
        "  </tbody>\n",
@@ -349,46 +247,46 @@
        "</div>"
       ],
       "text/plain": [
-       "                  Index       Mode  \\\n",
-       "0  VectorStoreIndex    default   \n",
-       "1  VectorStoreIndex  embedding   \n",
-       "2          TreeIndex    default   \n",
-       "3          TreeIndex  summarize   \n",
-       "4          TreeIndex  embedding   \n",
-       "5          TreeIndex   retrieve   \n",
+       "              Index         Retriever Mode  \\\n",
+       "0  VectorStoreIndex                default   \n",
+       "1         TreeIndex            select_leaf   \n",
+       "2         TreeIndex  select_leaf_embedding   \n",
+       "3         TreeIndex               all_leaf   \n",
+       "4         TreeIndex                   root   \n",
        "\n",
-       "                                              Output   Duration  LLM Tokens  \\\n",
-       "0  \\nThe population of Berlin in 1949 was approxi...  52.319133        3545   \n",
-       "1  \\nThe population of Berlin in 1949 was approxi...   8.192025        3545   \n",
-       "2  The population of Berlin is approximately 3.7 ...  12.542335        5168   \n",
-       "3  \\nThe population of Berlin is approximately 3....  18.665586       21617   \n",
-       "4                  Approximately 3.7 million people.   3.573458         368   \n",
-       "5  \\nThe population of Berlin is 3.75 million reg...   2.269598        1439   \n",
+       "                                              Output    Duration  \\\n",
+       "0  \\nThe population of Berlin is approximately 3....    2.525580   \n",
+       "1  \\nIt is not possible to answer this question w...    5.536037   \n",
+       "2  \\nThe population of Berlin is approximately 3....    5.426232   \n",
+       "3  \\n\\nThe population of Berlin is approximately ...  238.278128   \n",
+       "4  \\nThe population of Berlin is 3.7 million with...    3.375349   \n",
        "\n",
-       "   Embedding Tokens  \n",
-       "0                 7  \n",
-       "1                 7  \n",
-       "2                 0  \n",
-       "3                 0  \n",
-       "4              4598  \n",
-       "5                 0  "
+       "   Prompt Tokens  Completion Tokens  Embed Tokens  \n",
+       "0           1786                 13             7  \n",
+       "1           4732                115             0  \n",
+       "2            897                 13          9146  \n",
+       "3          27291               5035             0  \n",
+       "4            558                 23             0  "
       ]
      },
-     "execution_count": 5,
+     "execution_count": 7,
      "metadata": {},
      "output_type": "execute_result"
     }
    ],
    "source": [
-    "playground.compare(\"What is the population of Berlin?\")"
+    "result_df"
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "8829a829",
    "metadata": {},
    "source": [
-    "### Initialize with Documents"
+    "### Initialize with Documents\n",
+    "\n",
+    "Automatically construct the playground using a vector, tree, and list index"
    ]
   },
   {
diff --git a/docs/examples/analysis/TokenPredictor.ipynb b/docs/examples/analysis/TokenPredictor.ipynb
deleted file mode 100644
index 5e01b2ca14..0000000000
--- a/docs/examples/analysis/TokenPredictor.ipynb
+++ /dev/null
@@ -1,374 +0,0 @@
-{
- "cells": [
-  {
-   "attachments": {},
-   "cell_type": "markdown",
-   "id": "df19606e-d67e-44d2-bed0-4b804e6fc6c3",
-   "metadata": {},
-   "source": [
-    "# Token Predictors\n",
-    "\n",
-    "Using our token predictors, we can predict the token usage of an operation before actually performing it.\n",
-    "\n",
-    "We first show how to predict LLM token usage with the MockLLMPredictor class, see below.\n",
-    "We then show how to also predict embedding token usage."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "f1a9eb90-335c-4214-8bb6-fd1edbe3ccbd",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# My OpenAI Key\n",
-    "import os\n",
-    "\n",
-    "os.environ[\"OPENAI_API_KEY\"] = \"INSERT OPENAI KEY\""
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "8a707fa6-d79e-4343-92fd-d0fadb25c466",
-   "metadata": {},
-   "source": [
-    "## Using MockLLMPredictor"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "be3f7baa-1c0a-430b-981b-83ddca9e71f2",
-   "metadata": {
-    "tags": []
-   },
-   "source": [
-    "#### Predicting Usage of GPT Tree Index\n",
-    "\n",
-    "Here we predict usage of TreeIndex during index construction and querying, without making any LLM calls.\n",
-    "\n",
-    "NOTE: Predicting query usage before tree is built is only possible with TreeIndex due to the nature of tree traversal. Results will be more accurate if TreeIndex is actually built beforehand."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 6,
-   "id": "c0ef16d1-45ef-43ec-9aad-4e44e9bb8578",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index import (\n",
-    "    TreeIndex,\n",
-    "    MockLLMPredictor,\n",
-    "    SimpleDirectoryReader,\n",
-    "    ServiceContext,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "id": "b2ecdadc-1403-4bd4-a876-f80e4da911ef",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "documents = SimpleDirectoryReader(\"../paul_graham_essay/data\").load_data()"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 8,
-   "id": "11056808-fd7f-4bc6-9348-0605fb4ee668",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "llm_predictor = MockLLMPredictor(max_tokens=256)\n",
-    "service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "9ea4ba66-9a09-4478-b0a8-dee8645fa4e3",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "index = TreeIndex.from_documents(documents, service_context=service_context)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 10,
-   "id": "345433c2-5553-4645-a513-0186b771a21f",
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "19495\n"
-     ]
-    }
-   ],
-   "source": [
-    "print(llm_predictor.last_token_usage)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "id": "f43733ae-af35-46e6-99d9-8ba507acbb0d",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "# default query\n",
-    "query_engine = index.as_query_engine(service_context=service_context)\n",
-    "response = query_engine.query(\"What did the author do growing up?\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 13,
-   "id": "4ba19751-da2d-46af-9f8f-4f42871e65a0",
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "5493\n"
-     ]
-    }
-   ],
-   "source": [
-    "print(llm_predictor.last_token_usage)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "4324d85b-ae80-48ab-baf0-7dc160dfae46",
-   "metadata": {},
-   "source": [
-    "#### Predicting Usage of GPT Keyword Table Index Query\n",
-    "\n",
-    "Here we build a real keyword table index over the data, but then predict query usage."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 15,
-   "id": "10447805-38db-41b9-a2c6-b0c95437b276",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index import KeywordTableIndex, MockLLMPredictor, SimpleDirectoryReader"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 16,
-   "id": "8ca76e72-5f43-47c1-a9a4-c5c5db4f0f21",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "documents = SimpleDirectoryReader(\"../paul_graham_essay/data\").load_data()\n",
-    "index = KeywordTableIndex.from_documents(documents=documents)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 17,
-   "id": "61f48870-65d2-4b23-b57e-79082ecb4ab2",
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "start token ct: 0\n",
-      "> Starting query: What did the author do after his time at Y Combinator?\n",
-      "query keywords: ['author', 'did', 'y', 'combinator', 'after', 'his', 'the', 'what', 'time', 'at', 'do']\n",
-      "Extracted keywords: ['combinator']\n",
-      "> Querying with idx: 3483810247393006047: of 2016 we moved to England. We wanted our kids...\n",
-      "> Querying with idx: 7597483754542696814: people edit code on our server through the brow...\n",
-      "> Querying with idx: 7572417251450701751: invited about 20 of the 225 groups to interview...\n",
-      "end token ct: 11313\n",
-      "> [query] Total token usage: 11313 tokens\n",
-      "11313\n"
-     ]
-    }
-   ],
-   "source": [
-    "llm_predictor = MockLLMPredictor(max_tokens=256)\n",
-    "service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)\n",
-    "query_engine = index.as_query_engine(service_context=service_context)\n",
-    "response = query_engine.query(\"What did the author do after his time at Y Combinator?\")\n",
-    "print(llm_predictor.last_token_usage)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "0fee4405-05e0-46c2-87bb-64ec63a4c6c1",
-   "metadata": {},
-   "source": [
-    "#### Predicting Usage of GPT List Index Query\n",
-    "\n",
-    "Here we build a real list index over the data, but then predict query usage."
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 18,
-   "id": "267f2213-67d1-4241-b73f-f1790661d06b",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index import ListIndex, MockLLMPredictor, SimpleDirectoryReader"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 19,
-   "id": "d553a8b1-7045-4756-9729-df84bd305279",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "documents = SimpleDirectoryReader(\"../paul_graham_essay/data\").load_data()\n",
-    "index = ListIndex.from_documents(documents=documents)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 20,
-   "id": "69c99c68-6a23-48ed-aa41-e7af50fef2f3",
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "start token ct: 0\n",
-      "> Starting query: What did the author do after his time at Y Combinator?\n",
-      "end token ct: 23941\n",
-      "> [query] Total token usage: 23941 tokens\n"
-     ]
-    }
-   ],
-   "source": [
-    "llm_predictor = MockLLMPredictor(max_tokens=256)\n",
-    "service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)\n",
-    "query_engine = index.as_query_engine(service_context=service_context)\n",
-    "response = query_engine.query(\"What did the author do after his time at Y Combinator?\")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 21,
-   "id": "e8422c5c-af68-4138-a8dd-f6e8d7208c4c",
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "23941\n"
-     ]
-    }
-   ],
-   "source": [
-    "print(llm_predictor.last_token_usage)"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "1e19cf61-6d6a-4dfa-af78-1ce184f41c6c",
-   "metadata": {},
-   "source": [
-    "## Using MockEmbedding"
-   ]
-  },
-  {
-   "cell_type": "markdown",
-   "id": "106d86bf-7725-40bc-84ba-4f273493d3f6",
-   "metadata": {},
-   "source": [
-    "#### Predicting Usage of GPT Simple Vector Index"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 3,
-   "id": "9baf0fe7-2c11-4233-a930-4e593433ba84",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "from llama_index import (\n",
-    "    VectorStoreIndex,\n",
-    "    MockLLMPredictor,\n",
-    "    MockEmbedding,\n",
-    "    SimpleDirectoryReader,\n",
-    ")"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 5,
-   "id": "97023361-fa47-4008-b8d7-e66d60c5b263",
-   "metadata": {},
-   "outputs": [],
-   "source": [
-    "documents = SimpleDirectoryReader(\"../paul_graham_essay/data\").load_data()\n",
-    "index = VectorStoreIndex.from_documents(documents=documents)"
-   ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": 7,
-   "id": "63ebe021-2b9c-4024-95f8-56cd9e7e7c47",
-   "metadata": {},
-   "outputs": [
-    {
-     "name": "stdout",
-     "output_type": "stream",
-     "text": [
-      "> [query] Total LLM token usage: 4374 tokens\n",
-      "> [query] Total embedding token usage: 14 tokens\n"
-     ]
-    }
-   ],
-   "source": [
-    "llm_predictor = MockLLMPredictor(max_tokens=256)\n",
-    "embed_model = MockEmbedding(embed_dim=1536)\n",
-    "service_context = ServiceContext.from_defaults(\n",
-    "    llm_predictor=llm_predictor, embed_model=embed_model\n",
-    ")\n",
-    "query_engine = index.as_query_engine(\n",
-    "    service_context=service_context,\n",
-    ")\n",
-    "response = query_engine.query(\n",
-    "    \"What did the author do after his time at Y Combinator?\",\n",
-    ")"
-   ]
-  }
- ],
- "metadata": {
-  "kernelspec": {
-   "display_name": "gpt_retrieve_venv",
-   "language": "python",
-   "name": "gpt_retrieve_venv"
-  },
-  "language_info": {
-   "codemirror_mode": {
-    "name": "ipython",
-    "version": 3
-   },
-   "file_extension": ".py",
-   "mimetype": "text/x-python",
-   "name": "python",
-   "nbconvert_exporter": "python",
-   "pygments_lexer": "ipython3",
-   "version": "3.9.16"
-  }
- },
- "nbformat": 4,
- "nbformat_minor": 5
-}
diff --git a/docs/examples/callbacks/AimCallback.ipynb b/docs/examples/callbacks/AimCallback.ipynb
index ef76e0cfb5..eb9462d9d2 100644
--- a/docs/examples/callbacks/AimCallback.ipynb
+++ b/docs/examples/callbacks/AimCallback.ipynb
@@ -6,7 +6,7 @@
             "id": "fedcd46b",
             "metadata": {},
             "source": [
-                "# AimCallback Demo\n",
+                "# Aim Callback\n",
                 "\n",
                 "Aim is an easy-to-use & supercharged open-source AI metadata tracker it logs all your AI metadata (experiments, prompts, etc) enables a UI to compare & observe them and SDK to query them programmatically. For more please see the [Github page](https://github.com/aimhubio/aim).\n",
                 "\n",
diff --git a/docs/examples/callbacks/LlamaDebugHandler.ipynb b/docs/examples/callbacks/LlamaDebugHandler.ipynb
index 296882231d..ca0311f127 100644
--- a/docs/examples/callbacks/LlamaDebugHandler.ipynb
+++ b/docs/examples/callbacks/LlamaDebugHandler.ipynb
@@ -6,7 +6,7 @@
    "id": "fedcd46b",
    "metadata": {},
    "source": [
-    "# Llama Debug Handler Demo\n",
+    "# Llama Debug Handler\n",
     "\n",
     "Here we showcase the capabilities of our LlamaDebugHandler in logging events as we run queries\n",
     "within LlamaIndex.\n",
diff --git a/docs/examples/callbacks/TokenCountingHandler.ipynb b/docs/examples/callbacks/TokenCountingHandler.ipynb
index f025526060..ce2c74ed15 100644
--- a/docs/examples/callbacks/TokenCountingHandler.ipynb
+++ b/docs/examples/callbacks/TokenCountingHandler.ipynb
@@ -5,7 +5,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "# TokenCountingHandler - Demo Usage\n",
+    "# Token Counting Handler\n",
     "\n",
     "This notebook walks through how to use the TokenCountingHandler and how it can be used to track your prompt, completion, and embedding token usage over time."
    ]
diff --git a/docs/examples/callbacks/WandbCallbackHandler.ipynb b/docs/examples/callbacks/WandbCallbackHandler.ipynb
index 187fda29c3..5344f64d60 100644
--- a/docs/examples/callbacks/WandbCallbackHandler.ipynb
+++ b/docs/examples/callbacks/WandbCallbackHandler.ipynb
@@ -6,7 +6,7 @@
    "id": "c0d8b66c",
    "metadata": {},
    "source": [
-    "# WandbCallbackHandler Demo\n",
+    "# Wandb Callback Handler\n",
     "\n",
     "[Weights & Biases Prompts](https://docs.wandb.ai/guides/prompts) is a suite of LLMOps tools built for the development of LLM-powered applications.\n",
     "\n",
@@ -73,7 +73,7 @@
    "id": "e6feb252",
    "metadata": {},
    "source": [
-    "# Setup LLM"
+    "## Setup LLM"
    ]
   },
   {
@@ -92,7 +92,7 @@
    "id": "8790f4c7",
    "metadata": {},
    "source": [
-    "# W&B Callback Manager Setup"
+    "## W&B Callback Manager Setup"
    ]
   },
   {
@@ -142,7 +142,7 @@
    "id": "a4a7c101",
    "metadata": {},
    "source": [
-    "# 1. Indexing"
+    "## 1. Indexing"
    ]
   },
   {
@@ -197,7 +197,7 @@
    "id": "0a948efc",
    "metadata": {},
    "source": [
-    "## 1.1 Persist Index as W&B Artifacts"
+    "### 1.1 Persist Index as W&B Artifacts"
    ]
   },
   {
@@ -224,7 +224,7 @@
    "id": "7ed156a6",
    "metadata": {},
    "source": [
-    "## 1.2 Download Index from W&B Artifacts"
+    "### 1.2 Download Index from W&B Artifacts"
    ]
   },
   {
@@ -265,7 +265,7 @@
    "id": "ae4de4a9",
    "metadata": {},
    "source": [
-    "# 2. Query Over Index"
+    "## 2. Query Over Index"
    ]
   },
   {
@@ -315,7 +315,7 @@
    "id": "d7250272",
    "metadata": {},
    "source": [
-    "# 3. Build Complex Indices"
+    "## 3. Build Complex Indices"
    ]
   },
   {
@@ -454,7 +454,7 @@
    "id": "60aa7e5f",
    "metadata": {},
    "source": [
-    "## 3.1. Query Over Graph Index"
+    "### 3.1. Query Over Graph Index"
    ]
   },
   {
@@ -666,7 +666,7 @@
    "id": "c49ff101",
    "metadata": {},
    "source": [
-    "### Close W&B Callback Handler\n",
+    "## Close W&B Callback Handler\n",
     "\n",
     "When we are done tracking our events we can close the wandb run."
    ]
diff --git a/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb b/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb
index 2df98ae955..a91fb6fdd0 100644
--- a/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb
+++ b/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_camel.ipynb
@@ -108,8 +108,7 @@
     "llm = HuggingFaceLLM(\n",
     "    context_window=2048,\n",
     "    max_new_tokens=256,\n",
-    "    temperature=0.25,\n",
-    "    do_sample=False,\n",
+    "    generate_kwargs={\"temperature\": 0.25, \"do_sample\": False},\n",
     "    query_wrapper_prompt=query_wrapper_prompt,\n",
     "    tokenizer_name=\"Writer/camel-5b-hf\",\n",
     "    model_name=\"Writer/camel-5b-hf\",\n",
diff --git a/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb b/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb
index 324a66a6e6..77955e9da5 100644
--- a/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb
+++ b/docs/examples/customization/llms/SimpleIndexDemo-Huggingface_stablelm.ipynb
@@ -110,8 +110,7 @@
     "llm = HuggingFaceLLM(\n",
     "    context_window=4096,\n",
     "    max_new_tokens=256,\n",
-    "    temperature=0.7,\n",
-    "    do_sample=False,\n",
+    "    generate_kwargs={\"temperature\": 0.7, \"do_sample\": False},\n",
     "    system_prompt=system_prompt,\n",
     "    query_wrapper_prompt=query_wrapper_prompt,\n",
     "    tokenizer_name=\"StabilityAI/stablelm-tuned-alpha-3b\",\n",
diff --git a/docs/examples/customization/prompts/chat_prompts.ipynb b/docs/examples/customization/prompts/chat_prompts.ipynb
new file mode 100644
index 0000000000..eae5407cc8
--- /dev/null
+++ b/docs/examples/customization/prompts/chat_prompts.ipynb
@@ -0,0 +1,199 @@
+{
+ "cells": [
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Chat Prompts Customization"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Prompt Setup\n",
+    "\n",
+    "Below, we take the default prompts and customize them to always answer, even if the context is not helpful."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from langchain.prompts.chat import (\n",
+    "    ChatPromptTemplate,\n",
+    "    HumanMessagePromptTemplate,\n",
+    "    SystemMessagePromptTemplate,\n",
+    ")\n",
+    "from llama_index.prompts import Prompt\n",
+    "\n",
+    "chat_text_qa_msgs = [\n",
+    "    SystemMessagePromptTemplate.from_template(\n",
+    "        \"Always answer the question, even if the context isn't helpful.\"\n",
+    "    ),\n",
+    "    HumanMessagePromptTemplate.from_template(\n",
+    "        \"Context information is below.\\n\"\n",
+    "        \"---------------------\\n\"\n",
+    "        \"{context_str}\\n\"\n",
+    "        \"---------------------\\n\"\n",
+    "        \"Given the context information and not prior knowledge, \"\n",
+    "        \"answer the question: {query_str}\\n\"\n",
+    "    ),\n",
+    "]\n",
+    "chat_text_qa_msgs_lc = ChatPromptTemplate.from_messages(chat_text_qa_msgs)\n",
+    "text_qa_template = Prompt.from_langchain_prompt(chat_text_qa_msgs_lc)\n",
+    "\n",
+    "# Refine Prompt\n",
+    "chat_refine_msgs = [\n",
+    "    SystemMessagePromptTemplate.from_template(\n",
+    "        \"Always answer the question, even if the context isn't helpful.\"\n",
+    "    ),\n",
+    "    HumanMessagePromptTemplate.from_template(\n",
+    "        \"We have the opportunity to refine the original answer \"\n",
+    "        \"(only if needed) with some more context below.\\n\"\n",
+    "        \"------------\\n\"\n",
+    "        \"{context_msg}\\n\"\n",
+    "        \"------------\\n\"\n",
+    "        \"Given the new context, refine the original answer to better \"\n",
+    "        \"answer the question: {query_str}. \"\n",
+    "        \"If the context isn't useful, output the original answer again.\\n\"\n",
+    "        \"Original Answer: {existing_answer}\"\n",
+    "    ),\n",
+    "]\n",
+    "\n",
+    "\n",
+    "chat_refine_msgs_lc = ChatPromptTemplate.from_messages(chat_refine_msgs)\n",
+    "refine_template = Prompt.from_langchain_prompt(chat_refine_msgs_lc)"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Using the Prompts\n",
+    "\n",
+    "Now, we use the prompts in an index query!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import openai\n",
+    "import os\n",
+    "\n",
+    "os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n",
+    "openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n",
+    "from llama_index.llms import OpenAI\n",
+    "\n",
+    "documents = SimpleDirectoryReader(\"../../data/paul_graham/\").load_data()\n",
+    "\n",
+    "# Create an index using a chat model, so that we can use the chat prompts!\n",
+    "service_context = ServiceContext.from_defaults(\n",
+    "    llm=OpenAI(model=\"gpt-3.5-turbo\", temperature=0.0)\n",
+    ")\n",
+    "\n",
+    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Before Adding Templates"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Based on the given context information, there is no mention of Joe Biden. Therefore, it is not possible to determine who Joe Biden is based on this information alone.\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(index.as_query_engine().query(\"Who is Joe Biden?\"))"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### After Adding Templates"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Joe Biden is a politician who served as the 46th President of the United States.\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(\n",
+    "    index.as_query_engine(\n",
+    "        text_qa_template=text_qa_template, refine_template=refine_template\n",
+    "    ).query(\"Who is Joe Biden?\")\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "venv",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.6"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/examples/customization/prompts/completion_prompts.ipynb b/docs/examples/customization/prompts/completion_prompts.ipynb
new file mode 100644
index 0000000000..c900a7184e
--- /dev/null
+++ b/docs/examples/customization/prompts/completion_prompts.ipynb
@@ -0,0 +1,175 @@
+{
+ "cells": [
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Completion Prompts Customization"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Prompt Setup\n",
+    "\n",
+    "Below, we take the default prompts and customize them to always answer, even if the context is not helpful."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 28,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.prompts import Prompt\n",
+    "\n",
+    "text_qa_template_str = (\n",
+    "    \"Context information is below.\\n\"\n",
+    "    \"---------------------\\n\"\n",
+    "    \"{context_str}\\n\"\n",
+    "    \"---------------------\\n\"\n",
+    "    \"Using both the context information and also using your own knowledge, \"\n",
+    "    \"answer the question: {query_str}\\n\"\n",
+    "    \"If the context isn't helpful, you can also answer the question on your own.\\n\"\n",
+    ")\n",
+    "text_qa_template = Prompt(text_qa_template_str)\n",
+    "\n",
+    "refine_template_str = (\n",
+    "    \"The original question is as follows: {query_str}\\n\"\n",
+    "    \"We have provided an existing answer: {existing_answer}\\n\"\n",
+    "    \"We have the opportunity to refine the existing answer \"\n",
+    "    \"(only if needed) with some more context below.\\n\"\n",
+    "    \"------------\\n\"\n",
+    "    \"{context_msg}\\n\"\n",
+    "    \"------------\\n\"\n",
+    "    \"Using both the new context and your own knowledege, update or repeat the existing answer.\\n\"\n",
+    ")\n",
+    "refine_template = Prompt(refine_template_str)"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Using the Prompts\n",
+    "\n",
+    "Now, we use the prompts in an index query!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 29,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import openai\n",
+    "import os\n",
+    "\n",
+    "os.environ[\"OPENAI_API_KEY\"] = \"YOU_API_KEY\"\n",
+    "openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 30,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index import VectorStoreIndex, SimpleDirectoryReader\n",
+    "\n",
+    "documents = SimpleDirectoryReader(\"../../data/paul_graham/\").load_data()\n",
+    "\n",
+    "index = VectorStoreIndex.from_documents(documents)"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### Before Adding Templates"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 31,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "Joe Biden is not mentioned in the context information.\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(index.as_query_engine().query(\"Who is Joe Biden?\"))"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "### After Adding Templates"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 33,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "Joe Biden is the 46th President of the United States. He was elected in 2020 and is the first Democratic president since Barack Obama. He previously served as Vice President under Obama from 2009 to 2017.\n"
+     ]
+    }
+   ],
+   "source": [
+    "print(\n",
+    "    index.as_query_engine(\n",
+    "        text_qa_template=text_qa_template, refine_template=refine_template\n",
+    "    ).query(\"Who is Joe Biden?\")\n",
+    ")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "venv",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.9.6"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/examples/embeddings/Langchain.ipynb b/docs/examples/embeddings/Langchain.ipynb
new file mode 100644
index 0000000000..b3ce7b10e2
--- /dev/null
+++ b/docs/examples/embeddings/Langchain.ipynb
@@ -0,0 +1,40 @@
+{
+ "cells": [
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Langchain Embeddings"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from langchain.embeddings import HuggingFaceEmbeddings\n",
+    "from llama_index.embeddings import LangchainEmbedding\n",
+    "from llama_index import ServiceContext, set_global_service_context\n",
+    "\n",
+    "embed_model = LangchainEmbedding(\n",
+    "    HuggingFaceEmbeddings(\"sentence-transformers/all-mpnet-base-v2\")\n",
+    ")\n",
+    "\n",
+    "service_context = ServiceContext.from_defaults(embed_model=embed_model)\n",
+    "\n",
+    "# optionally set a global service context\n",
+    "set_global_service_context(service_context)"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/examples/embeddings/OpenAI.ipynb b/docs/examples/embeddings/OpenAI.ipynb
new file mode 100644
index 0000000000..29b4c8b492
--- /dev/null
+++ b/docs/examples/embeddings/OpenAI.ipynb
@@ -0,0 +1,50 @@
+{
+ "cells": [
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# OpenAI Embeddings"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import openai\n",
+    "\n",
+    "os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n",
+    "openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index.embeddings import OpenAIEmbedding\n",
+    "from llama_index import ServiceContext, set_global_service_context\n",
+    "\n",
+    "embed_model = OpenAIEmbedding(embed_batch_size=10)\n",
+    "\n",
+    "service_context = ServiceContext.from_defaults(embed_model=embed_model)\n",
+    "\n",
+    "# optionally set a global service context\n",
+    "set_global_service_context(service_context)"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/examples/embeddings/custom_embeddings.ipynb b/docs/examples/embeddings/custom_embeddings.ipynb
new file mode 100644
index 0000000000..2bc869a43c
--- /dev/null
+++ b/docs/examples/embeddings/custom_embeddings.ipynb
@@ -0,0 +1,184 @@
+{
+ "cells": [
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Custom Embeddings\n",
+    "LlamaIndex supports embeddings from OpenAI, Azure, and Langchain. But if this isn't enough, you can also implement any embeddings model!\n",
+    "\n",
+    "The example below uses Instructor Embeddings ([install/setup details here](https://huggingface.co/hkunlp/instructor-large)), and implements a custom embeddings class. Instructor embeddings work by providing text, as well as \"instructions\" on the domain of the text to embed. This is helpful when embedding text from a very specific and specialized topic.\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Install dependencies\n",
+    "# !pip install InstructorEmbedding torch transformers sentence-transformers"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 23,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import openai\n",
+    "import os\n",
+    "\n",
+    "os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n",
+    "openai.api_key = os.environ[\"OPENAI_API_KEY\"]"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Custom Embeddings Implementation"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 29,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from typing import Any, List\n",
+    "from InstructorEmbedding import INSTRUCTOR\n",
+    "from llama_index.embeddings.base import BaseEmbedding\n",
+    "\n",
+    "\n",
+    "class InstructorEmbeddings(BaseEmbedding):\n",
+    "    def __init__(\n",
+    "        self,\n",
+    "        instructor_model_name: str = \"hkunlp/instructor-large\",\n",
+    "        instruction: str = \"Represent a document for semantic search:\",\n",
+    "        **kwargs: Any,\n",
+    "    ) -> None:\n",
+    "        self._model = INSTRUCTOR(instructor_model_name)\n",
+    "        self._instruction = instruction\n",
+    "        super().__init__(**kwargs)\n",
+    "\n",
+    "    def _get_query_embedding(self, query: str) -> List[float]:\n",
+    "        embeddings = self._model.encode([[self._instruction, query]])\n",
+    "        return embeddings[0]\n",
+    "\n",
+    "    def _get_text_embedding(self, text: str) -> List[float]:\n",
+    "        embeddings = self._model.encode([[self._instruction, text]])\n",
+    "        return embeddings[0]\n",
+    "\n",
+    "    def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:\n",
+    "        embeddings = self._model.encode([[self._instruction, text] for text in texts])\n",
+    "        return embeddings"
+   ]
+  },
+  {
+   "attachments": {},
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Usage Example"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 30,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from llama_index import ServiceContext, SimpleDirectoryReader, VectorStoreIndex"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 31,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "documents = SimpleDirectoryReader(\"../../data/paul_graham/\").load_data()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 32,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "load INSTRUCTOR_Transformer\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "max_seq_length  512\n"
+     ]
+    }
+   ],
+   "source": [
+    "service_context = ServiceContext.from_defaults(\n",
+    "    embed_model=InstructorEmbeddings(embed_batch_size=2), chunk_size=512\n",
+    ")\n",
+    "\n",
+    "# if running for the first time, will download model weights first!\n",
+    "index = VectorStoreIndex.from_documents(documents, service_context=service_context)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 33,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "The author grew up writing short stories, programming on an IBM 1401, writing essays on various topics, working on spam filters, painting, cooking for groups, and buying a building in Cambridge.\n"
+     ]
+    }
+   ],
+   "source": [
+    "response = index.as_query_engine().query(\"What did the author do growing up?\")\n",
+    "print(response)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "llama-index",
+   "language": "python",
+   "name": "llama-index"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.11.0"
+  },
+  "orig_nbformat": 4
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/docs/examples/evaluation/QuestionGeneration.ipynb b/docs/examples/evaluation/QuestionGeneration.ipynb
index e054ed2a8e..d535f8ca80 100644
--- a/docs/examples/evaluation/QuestionGeneration.ipynb
+++ b/docs/examples/evaluation/QuestionGeneration.ipynb
@@ -6,7 +6,9 @@
    "id": "f3f797ad",
    "metadata": {},
    "source": [
-    "# Question Generation"
+    "# QuestionGeneration\n",
+    "\n",
+    "This notebook walks through the process of generating a list of questions that could be asked about your data. This is useful for setting up an evaluation pipeline using the `ResponseEvaluator` and `QueryResponseEvaluator` evaluation tools."
    ]
   },
   {
diff --git a/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb b/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
index bac4c2ae31..7afd23bda9 100644
--- a/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
+++ b/docs/examples/evaluation/TestNYC-Evaluation-Query.ipynb
@@ -6,7 +6,9 @@
    "id": "0c266183",
    "metadata": {},
    "source": [
-    "# Query Response Evaluator"
+    "# QueryResponseEvaluator\n",
+    "\n",
+    "This notebook uses the `QueryResponseEvaluator` to measure if the response + source nodes match the query. This is useful for measuring if the query was actually answered by the response."
    ]
   },
   {
diff --git a/docs/examples/evaluation/TestNYC-Evaluation.ipynb b/docs/examples/evaluation/TestNYC-Evaluation.ipynb
index e4b6e3dd60..4659e0ebe4 100644
--- a/docs/examples/evaluation/TestNYC-Evaluation.ipynb
+++ b/docs/examples/evaluation/TestNYC-Evaluation.ipynb
@@ -6,7 +6,9 @@
    "id": "de6537c4",
    "metadata": {},
    "source": [
-    "# Response Evaluator"
+    "# ResponseEvaluator\n",
+    "\n",
+    "This notebook uses the `ResponseEvaluator` module to measure if the response from a query engine matches any response nodes. This is useful for measuring if the response was hallucinated."
    ]
   },
   {
diff --git a/docs/examples/llm/palm.ipynb b/docs/examples/llm/palm.ipynb
new file mode 100644
index 0000000000..883f9968cc
--- /dev/null
+++ b/docs/examples/llm/palm.ipynb
@@ -0,0 +1,189 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "368686b4-f487-4dd4-aeff-37823976529d",
+   "metadata": {},
+   "source": [
+    "# PaLM \n",
+    "\n",
+    "In this short notebook, we show how to use the PaLM LLM from Google in LlamaIndex: https://ai.google/discover/palm2/.\n",
+    "\n",
+    "We use the `text-bison-001` model by default."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e7927630-0044-41fb-a8a6-8dc3d2adb608",
+   "metadata": {},
+   "source": [
+    "### Setup"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "e09939e2-57be-4eba-9bde-2a9409c1600f",
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "\n",
+      "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip available: \u001b[0m\u001b[31;49m22.3.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m23.1.2\u001b[0m\n",
+      "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpip install --upgrade pip\u001b[0m\n"
+     ]
+    }
+   ],
+   "source": [
+    "!pip install -q google-generativeai"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "id": "429e80b3-58aa-4804-8877-4573faed52a6",
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [],
+   "source": [
+    "import pprint\n",
+    "import google.generativeai as palm"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "2014ee79-52a3-4521-bb36-3e96f1f9405c",
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [],
+   "source": [
+    "palm_api_key = \"\""
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "id": "059e6fe0-5878-4f13-940b-be5bf9fa1fec",
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [],
+   "source": [
+    "palm.configure(api_key=palm_api_key)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "e8594574-6c9d-422a-bd2e-1280cb208a04",
+   "metadata": {},
+   "source": [
+    "### Define Model"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "id": "6fa0ec4f-03ff-4e28-957f-b4b99a0faa20",
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "models/text-bison-001\n"
+     ]
+    }
+   ],
+   "source": [
+    "models = [\n",
+    "    m for m in palm.list_models() if \"generateText\" in m.supported_generation_methods\n",
+    "]\n",
+    "model = models[0].name\n",
+    "print(model)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "5e2e6a78-7e5d-4915-bcbf-6087edb30276",
+   "metadata": {},
+   "source": [
+    "### Start using our `PaLM` LLM abstraction!"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "43bac120-ff73-49b8-8d72-83f43091d169",
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [],
+   "source": [
+    "from llama_index.llms.palm import PaLM\n",
+    "\n",
+    "model = PaLM(api_key=palm_api_key)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 25,
+   "id": "5cfaf34c-0348-415e-98bb-83f782d64fe9",
+   "metadata": {
+    "tags": []
+   },
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "CompletionResponse(text='1 house has 3 cats * 4 mittens / cat = 12 mittens.\\n3 houses have 12 mittens / house * 3 houses = 36 mittens.\\n1 hat needs 4m of yarn. 36 hats need 4m / hat * 36 hats = 144m of yarn.\\n1 mitten needs 7m of yarn. 36 mittens need 7m / mitten * 36 mittens = 252m of yarn.\\nIn total 144m of yarn was needed for hats and 252m of yarn was needed for mittens, so 144m + 252m = 396m of yarn was needed.\\n\\nThe answer: 396', additional_kwargs={}, raw={'output': '1 house has 3 cats * 4 mittens / cat = 12 mittens.\\n3 houses have 12 mittens / house * 3 houses = 36 mittens.\\n1 hat needs 4m of yarn. 36 hats need 4m / hat * 36 hats = 144m of yarn.\\n1 mitten needs 7m of yarn. 36 mittens need 7m / mitten * 36 mittens = 252m of yarn.\\nIn total 144m of yarn was needed for hats and 252m of yarn was needed for mittens, so 144m + 252m = 396m of yarn was needed.\\n\\nThe answer: 396', 'safety_ratings': [{'category': <HarmCategory.HARM_CATEGORY_DEROGATORY: 1>, 'probability': <HarmProbability.NEGLIGIBLE: 1>}, {'category': <HarmCategory.HARM_CATEGORY_TOXICITY: 2>, 'probability': <HarmProbability.NEGLIGIBLE: 1>}, {'category': <HarmCategory.HARM_CATEGORY_VIOLENCE: 3>, 'probability': <HarmProbability.NEGLIGIBLE: 1>}, {'category': <HarmCategory.HARM_CATEGORY_SEXUAL: 4>, 'probability': <HarmProbability.NEGLIGIBLE: 1>}, {'category': <HarmCategory.HARM_CATEGORY_MEDICAL: 5>, 'probability': <HarmProbability.NEGLIGIBLE: 1>}, {'category': <HarmCategory.HARM_CATEGORY_DANGEROUS: 6>, 'probability': <HarmProbability.NEGLIGIBLE: 1>}]}, delta=None)"
+      ]
+     },
+     "execution_count": 25,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "model.complete(prompt)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "7b059409-cd9d-4651-979c-03b3943e94af",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "llama_index_v2",
+   "language": "python",
+   "name": "llama_index_v2"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.10.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/examples/response_builder/refine.ipynb b/docs/examples/response_synthesizers/refine.ipynb
similarity index 97%
rename from docs/examples/response_builder/refine.ipynb
rename to docs/examples/response_synthesizers/refine.ipynb
index 6f72bc567f..4d871c6139 100644
--- a/docs/examples/response_builder/refine.ipynb
+++ b/docs/examples/response_synthesizers/refine.ipynb
@@ -1,6 +1,7 @@
 {
  "cells": [
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "540ff471-dcea-4b3e-9c0c-a3173f1c640e",
    "metadata": {},
@@ -9,6 +10,7 @@
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "158b08a8-32d3-4397-ad37-75870416226b",
    "metadata": {},
@@ -76,6 +78,7 @@
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "efed56ee-fcd3-439c-a1b2-53c643f15c8e",
    "metadata": {},
@@ -120,7 +123,7 @@
    },
    "outputs": [],
    "source": [
-    "from llama_index.indices.response import Refine\n",
+    "from llama_index.response_synthesizers import Refine\n",
     "\n",
     "summarizer = Refine(service_context=service_context, verbose=True)"
    ]
diff --git a/docs/examples/response_builder/tree_summarize.ipynb b/docs/examples/response_synthesizers/tree_summarize.ipynb
similarity index 96%
rename from docs/examples/response_builder/tree_summarize.ipynb
rename to docs/examples/response_synthesizers/tree_summarize.ipynb
index b408ed468e..96085dec51 100644
--- a/docs/examples/response_builder/tree_summarize.ipynb
+++ b/docs/examples/response_synthesizers/tree_summarize.ipynb
@@ -1,6 +1,7 @@
 {
  "cells": [
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "540ff471-dcea-4b3e-9c0c-a3173f1c640e",
    "metadata": {},
@@ -9,6 +10,7 @@
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "158b08a8-32d3-4397-ad37-75870416226b",
    "metadata": {},
@@ -67,6 +69,7 @@
    ]
   },
   {
+   "attachments": {},
    "cell_type": "markdown",
    "id": "efed56ee-fcd3-439c-a1b2-53c643f15c8e",
    "metadata": {},
@@ -83,7 +86,7 @@
    },
    "outputs": [],
    "source": [
-    "from llama_index.indices.response import TreeSummarize"
+    "from llama_index.response_synthesizers import TreeSummarize"
    ]
   },
   {
diff --git a/docs/getting_started/FAQ.md b/docs/getting_started/FAQ.md
new file mode 100644
index 0000000000..32cce9075d
--- /dev/null
+++ b/docs/getting_started/FAQ.md
@@ -0,0 +1 @@
+# FAQ
\ No newline at end of file
diff --git a/docs/getting_started/concepts.md b/docs/getting_started/concepts.md
new file mode 100644
index 0000000000..f9dd978e92
--- /dev/null
+++ b/docs/getting_started/concepts.md
@@ -0,0 +1,83 @@
+# High-Level Concepts
+
+```{tip}
+If you haven't, [install](/getting_started/installation.md) and complete [starter tutorial](/getting_started/starter_example.md) before you read this. It will make a lot more sense!
+```
+
+LlamaIndex helps you build LLM-powered applications (e.g. Q&A, chatbot, and agents) over custom data.
+
+In this high-level concepts guide, you will learn:
+* the retrieval augmented generation (RAG) paradigm for combining LLM with custom data,
+* key concepts and modules in LlamaIndex for composing your own RAG pipeline.
+
+## Retrieval Augmented Generation (RAG)
+Retrieval augmented generation (RAG) is a paradigm for augmenting LLM with custom data.
+It generally consists of two stages: 
+1) **indexing stage**: preparing a knowledge base, and
+2) **querying stage**: retrieving relevant context from the knowledge to assist the LLM in responding to a question
+
+![](/_static/getting_started/rag.jpg)
+
+
+LlamaIndex provides the essential toolkit for making both steps super easy.
+Let's explore each stage in detail.
+
+### Indexing Stage
+LlamaIndex help you prepare the knowledge base with a suite of data connectors and indexes.
+![](/_static/getting_started/indexing.jpg) 
+
+[**Data Connectors**](/core_modules/data_modules/connector/root.md):
+A data connector (i.e. `Reader`) ingest data from different data sources and data formats into a simple `Document` representation (text and simple metadata).
+
+[**Documents / Nodes**](/core_modules/data_modules/documents_and_nodes/root.md): A `Document` is a generic container around any data source - for instance, a PDF, an API output, or retrieved data from a database. A `Node` is the atomic unit of data in LlamaIndex and represents a "chunk" of a source `Document`. It's a rich representation that includes metadata and relationships (to other nodes) to enable accurate and expressive retrieval operations.
+
+[**Data Indexes**](/core_modules/data_modules/index/root.md): 
+Once you've ingested your data, LlamaIndex help you index data into a format that's easy to retrieve.
+Under the hood, LlamaIndex parse the raw documents into intermediate representations, calculate vector embeddings, and infer metadata, etc.
+The most commonly used index is the [VectorStoreIndex](/core_modules/data_modules/index/vector_store_guide.ipynb)
+
+### Querying Stage
+In the querying stage, the RAG pipeline retrieves the most relevant context given a user query,
+and pass that to the LLM (along with the query) to synthesize a response.
+This gives the LLM up-to-date knowledge that is not in its original training data,
+(also reducing hallucination).
+The key challenge in the querying stage is retrieval, orchestration, and reasoning over (potentially many) knowledge bases.
+
+LlamaIndex provides composable modules that help you build and integrate RAG pipelines for Q&A (query engine), chatbot (chat engine), or as part of an agent.
+These building blocks can be customized to reflect ranking preferences, as well as composed to reason over multiple knowledge bases in a structured way.
+
+![](/_static/getting_started/querying.jpg)
+
+#### Building Blocks
+[**Retrievers**](/core_modules/query_modules/retriever/root.md): 
+A retriever defines how to efficiently retrieve relevant context from a knowledge base (i.e. index) when given a query.
+The specific retrieval logic differs for difference indices, the most popular being dense retrieval against a vector index.
+
+[**Node Postprocessors**](/core_modules/query_modules/node_postprocessors/root.md):
+A node postprocessor takes in a set of nodes, then apply transformation, filtering, or re-ranking logic to them. 
+
+[**Response Synthesizers**](/core_modules/query_modules/response_synthesizers/root.md):
+A response synthesizer generates a response from an LLM, using a user query and a given set of retrieved text chunks.  
+
+#### Pipelines
+
+[**Query Engines**](/core_modules/query_modules/query_engine/root.md):
+A query engine is an end-to-end pipeline that allow you to ask question over your data.
+It takes in a natural language query, and returns a response, along with reference context retrieved and passed to the LLM.
+
+
+[**Chat Engines**](/core_modules/query_modules/chat_engines/root.md): 
+A chat engine is an end-to-end pipeline for having a conversation with your data
+(multiple back-and-forth instead of a single question & answer).
+
+[**Agents**](/core_modules/query_modules/agent/root.md): 
+An agent is an automated decision maker (powered by an LLM) that interacts with the world via a set of tools.
+Agent may be used in the same fashion as query engines or chat engines. 
+The main distinction is that an agent dynamically decides the best sequence of actions, instead of following a predetermined logic.
+This gives it additional flexibility to tackle more complex tasks.
+
+```{admonition} Next Steps
+* tell me how to [customize things](/getting_started/customization.rst).
+* curious about a specific module? Check out the module guides 👈
+* have a use case in mind? Check out the [end-to-end tutorials](/end_to_end_tutorials/use_cases.md)
+```
\ No newline at end of file
diff --git a/docs/getting_started/customization.rst b/docs/getting_started/customization.rst
new file mode 100644
index 0000000000..7222cc7425
--- /dev/null
+++ b/docs/getting_started/customization.rst
@@ -0,0 +1,181 @@
+Customization Tutorial
+======================
+.. tip::
+    If you haven't, `install <installation.html>`_, complete `starter tutorial <starter_example.html>`_, and learn the `high-level concepts <concepts.html>`_ before you read this. It will make a lot more sense!
+
+In this tutorial, we show the most common customizations with the `starter example <starter_example.html>`_:
+
+.. code-block:: python
+
+    from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+    documents = SimpleDirectoryReader('data').load_data()
+    index = VectorStoreIndex.from_documents(documents)
+    query_engine = index.as_query_engine()
+    response = query_engine.query("What did the author do growing up?")
+    print(response)
+
+-----------------
+
+**"I want to parse my documents into smaller chunks"**
+
+.. code-block:: python
+
+    from llama_index import ServiceContext
+    service_context = ServiceContext.from_defaults(chunk_size=1000)
+
+.. tip::
+    `ServiceContext` is a bundle of services and configurations used across a LlamaIndex pipeline,
+    Learn more `here <../core_modules/supporting_modules/service_context.html>`_.
+
+.. code-block:: python
+    :emphasize-lines: 4
+
+    from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+    documents = SimpleDirectoryReader('data').load_data()
+    index = VectorStoreIndex.from_documents(documents, service_context=service_context)
+    query_engine = index.as_query_engine()
+    response = query_engine.query("What did the author do growing up?")
+    print(response)
+
+-----------------
+
+**"I want to use a different vector store"**
+
+.. code-block:: python
+
+    import chromadb
+    from llama_index.vector_stores import ChromaVectorStore
+    from llama_index import StorageContext
+
+    chroma_client = chromadb.Client()
+    chroma_collection = chroma_client.create_collection("quickstart")
+    vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
+    storage_context = StorageContext.from_defaults(vector_store=vector_store)
+
+.. tip::
+    `StorageContext` defines the storage backend for where the documents, embeddings, and indexes are stored.
+    Learn more `here <../core_modules/data_modules/storage/customization.html>`_.
+
+.. code-block:: python
+    :emphasize-lines: 4
+
+    from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+    documents = SimpleDirectoryReader('data').load_data()
+    index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
+    query_engine = index.as_query_engine()
+    response = query_engine.query("What did the author do growing up?")
+    print(response)
+
+-----------------
+
+**"I want to retrieve more context when I query"**
+
+.. code-block:: python
+    :emphasize-lines: 5
+
+    from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+    documents = SimpleDirectoryReader('data').load_data()
+    index = VectorStoreIndex.from_documents(documents)
+    query_engine = index.as_query_engine(similarity_top_k=5)
+    response = query_engine.query("What did the author do growing up?")
+    print(response)
+
+.. tip::
+    `as_query_engine` builds a default retriever and query engine on top of the index.
+    You can configure the retriever and query engine by passing in keyword arguments.
+    Here, we configure the retriever to return the top 5 most similar documents (instead of the default of 2).
+    Learn more about vector index `here <../core_modules/data_modules/index/vector_store_guide.html>`_.
+
+-----------------
+
+**"I want to use a different LLM"**
+
+.. code-block:: python
+
+    from llama_index import ServiceContext
+    from llama_index.llms import PaLM
+    service_context = ServiceContext.from_defaults(llm=PaLM())
+
+.. tip::
+    Learn more about customizing LLMs `here <../core_modules/model_modules/llms/usage_custom.html>`_.
+
+.. code-block:: python
+    :emphasize-lines: 5
+
+    from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+    documents = SimpleDirectoryReader('data').load_data()
+    index = VectorStoreIndex.from_documents(documents)
+    query_engine = index.as_query_engine(service_context=service_context)
+    response = query_engine.query("What did the author do growing up?")
+    print(response)
+
+-----------------
+
+**"I want to use a different response mode"**
+
+
+.. code-block:: python
+    :emphasize-lines: 5
+
+    from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+    documents = SimpleDirectoryReader('data').load_data()
+    index = VectorStoreIndex.from_documents(documents)
+    query_engine = index.as_query_engine(response_mode='tree_summarize')
+    response = query_engine.query("What did the author do growing up?")
+    print(response)
+
+.. tip::
+    Learn more about query engine usage pattern `here <../core_modules/query_modules/query_engine/usage_pattern.html>`_ and available response modes `here <../core_modules/query_modules/query_engine/response_modes.html>`_.
+
+-----------------
+
+**"I want to stream the response back"**
+
+
+.. code-block:: python
+    :emphasize-lines: 5, 7
+
+    from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+    documents = SimpleDirectoryReader('data').load_data()
+    index = VectorStoreIndex.from_documents(documents)
+    query_engine = index.as_query_engine(streaming=True)
+    response = query_engine.query("What did the author do growing up?")
+    response.print_response_stream()
+
+.. tip::
+    Learn more about streaming `here <../how_to/customization/streaming.html>`_.
+
+-----------------
+
+**"I want a chatbot instead of Q&A"**
+
+.. code-block:: python
+    :emphasize-lines: 5, 6, 9
+
+    from llama_index import VectorStoreIndex, SimpleDirectoryReader
+
+    documents = SimpleDirectoryReader('data').load_data()
+    index = VectorStoreIndex.from_documents(documents)
+    query_engine = index.as_chat_engine()
+    response = query_engine.chat("What did the author do growing up?")
+    print(response)
+
+    response = query_engine.chat("Oh interesting, tell me more.")
+    print(response)
+
+.. tip::
+    Learn more about chat engine usage pattern `here <../core_modules/query_modules/chat_engines/usage_pattern.html>`_.
+
+-----------------
+
+.. admonition:: Next Steps
+
+    * want a thorough walkthrough of (almost) everything you can configure? Try the `end-to-end tutorial on basic usage pattern <../end_to_end_tutorials/usage_pattern.html>`_.
+    * want more in-depth understanding of specific modules? Check out the module guides 👈
\ No newline at end of file
diff --git a/docs/getting_started/installation.md b/docs/getting_started/installation.md
index 42920ae389..3ea662a7c3 100644
--- a/docs/getting_started/installation.md
+++ b/docs/getting_started/installation.md
@@ -19,6 +19,8 @@ Git clone this repository: `git clone https://github.com/jerryjliu/llama_index.g
 By default, we use the OpenAI GPT-3 `text-davinci-003` model. In order to use this, you must have an OPENAI_API_KEY setup.
 You can register an API key by logging into [OpenAI's page and creating a new API token](https://beta.openai.com/account/api-keys).
 
-You can customize the underlying LLM in the [Custom LLMs How-To](/how_to/customization/custom_llms.md) (courtesy of Langchain). You may
+```{tip}
+You can also [customize the underlying LLM](/core_modules/model_modules/llms/usage_custom.md). You may
 need additional environment keys + tokens setup depending on the LLM provider.
+```
 
diff --git a/docs/getting_started/starter_example.md b/docs/getting_started/starter_example.md
index 10ea65eabc..ede9e09127 100644
--- a/docs/getting_started/starter_example.md
+++ b/docs/getting_started/starter_example.md
@@ -1,6 +1,9 @@
 # Starter Tutorial
 
-Here is a starter example for using LlamaIndex. Make sure you've followed the [installation](installation.md) steps first.
+```{tip}
+Make sure you've followed the [installation](installation.md) steps first.
+```
+Here is a starter example for using LlamaIndex. 
 
 ### Download
 
@@ -84,9 +87,12 @@ storage_context = StorageContext.from_defaults(persist_dir="./storage")
 index = load_index_from_storage(storage_context)
 ```
 
-### Next Steps
 
-That's it! For more information on LlamaIndex features, please check out the numerous "Guides" to the left.
-If you are interested in further exploring how LlamaIndex works, check out our [Primer Guide](/guides/primer.rst).
 
-Additionally, if you would like to play around with Example Notebooks, check out [this link](/reference/example_notebooks.rst).
+```{admonition} Next Steps
+* learn more about the [high-level concepts](/getting_started/concepts.md).
+* tell me how to [customize things](/getting_started/customization.rst).
+* curious about a specific module? check out the guides 👈
+* have a use case in mind? check out the [end-to-end tutorials](/end_to_end_tutorials/use_cases.md)
+```
+
diff --git a/docs/guides/primer.rst b/docs/guides/primer.rst
deleted file mode 100644
index 9b1d2c3eb4..0000000000
--- a/docs/guides/primer.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-A Primer to using LlamaIndex
-============================
-
-At its core, LlamaIndex contains a toolkit designed to easily connect LLM's with your external data.
-The guides below are intended to help you get the most out of LlamaIndex. It gives a high-level overview of the following: 
-
-1. The general usage pattern of LlamaIndex (from data ingestion to data structures, to query interface)
-2. How Each Index Works
-3. Query Interface 
-4. Architecture Overview of LlamaIndex (as of 0.6.0)
-
-
-.. toctree::
-   :maxdepth: 1
-   :caption: General Guides
-
-   primer/usage_pattern.md
-   primer/index_guide.md
-   primer/query_interface.md
-   Architecture Overview <https://medium.com/better-programming/llamaindex-0-6-0-a-new-query-interface-over-your-data-331996d47e89>
-
diff --git a/docs/guides/primer/query_interface.md b/docs/guides/primer/query_interface.md
deleted file mode 100644
index e311e82d91..0000000000
--- a/docs/guides/primer/query_interface.md
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# Query Interface
-Querying an index or a graph involves a three main components:
-
-- **Retrievers**: A retriever class retrieves a set of Nodes from an index given a query.
-- **Response Synthesizer**: This class takes in a set of Nodes and synthesizes an answer given a query.
-- **Query Engine**: This class takes in a query and returns a Response object. It can make use
-   of Retrievers and Response Synthesizer modules under the hood.
-
-![](/_static/query/query_classes.png)
-
-
-## Design Philosophy: Progressive Disclosure of Complexity
-
-Progressive disclosure of complexity is a design philosophy that aims to strike 
-a balance between the needs of beginners and experts. The idea is that you should 
-give users the simplest and most straightforward interface or experience possible 
-when they first encounter a system or product, but then gradually reveal more 
-complexity and advanced features as users become more familiar with the system. 
-This can help prevent users from feeling overwhelmed or intimidated by a system 
-that seems too complex, while still giving experienced users the tools they need 
-to accomplish advanced tasks.
-
-![](/_static/query/disclosure.png)
-
-
-In the case of LlamaIndex, we've tried to balance simplicity and complexity by 
-providing a high-level API that's easy to use out of the box, but also a low-level 
-composition API that gives experienced users the control they need to customize the 
-system to their needs. By doing this, we hope to make LlamaIndex accessible to 
-beginners while still providing the flexibility and power that experienced users need.
-
-## Resources
-
-- The basic query interface over an index is found in our [usage pattern guide](/guides/primer/usage_pattern.md). The guide
-  details how to specify parameters for a retriever/synthesizer/query engine over a 
-  single index structure.
-- A more advanced query interface is found in our [composability guide](/how_to/index/composability.md). The guide
-  describes how to specify a graph over multiple index structures.
-- We also provide a guide to some of our more [advanced components](/how_to/query_engine/advanced/root.md), which can be added 
-  to a retriever or a query engine. See our [query transformations](/how_to/query_engine/advanced/query_transformations.md)
-  and 
-  [second-stage processing](/how_to/query_engine/advanced/second_stage.md) modules. 
-
diff --git a/docs/guides/tutorials.rst b/docs/guides/tutorials.rst
deleted file mode 100644
index ce47aa30ad..0000000000
--- a/docs/guides/tutorials.rst
+++ /dev/null
@@ -1,24 +0,0 @@
-Tutorials
-============================
-
-This section contains a list of in-depth tutorials on how to best utilize different capabilities
-of LlamaIndex within your end-user application, including video walkthroughs of key features!
-
-They also showcase a variety of application settings that LlamaIndex can be used, from a simple
-Jupyter notebook to a chatbot to a full-stack web application.
-
-.. toctree::
-   :maxdepth: 1
-   :caption: Tutorials
-
-   tutorials/discover_llamaindex.md
-   tutorials/building_a_chatbot.md
-   tutorials/fullstack_app_guide.md
-   tutorials/fullstack_with_delphic.md
-   tutorials/sql_guide.md
-   tutorials/terms_definitions_tutorial.md
-   tutorials/unified_query.md
-   tutorials/Airbyte_demo.ipynb
-   SEC 10k Analysis <https://medium.com/@jerryjliu98/how-unstructured-and-llamaindex-can-help-bring-the-power-of-llms-to-your-own-data-3657d063e30d>
-   Using LlamaIndex with Local Models <https://colab.research.google.com/drive/16QMQePkONNlDpgiltOi7oRQgmB8dU5fl?usp=sharing>
-
diff --git a/docs/how_to/analysis.rst b/docs/how_to/analysis.rst
deleted file mode 100644
index 866579cb05..0000000000
--- a/docs/how_to/analysis.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-🧠 Analysis
-==============================
-
-LlamaIndex provides a variety of tools for analysis
-of your indices and queries. Some of our tools involve the analysis token usage and cost.
-
-We also offer a Playground module, giving you a visual means of analyzing
-the token usage of various index structures + performance.
-
-
-.. toctree::
-   :maxdepth: 1
-   :caption: Analysis
-
-   analysis/cost_analysis.md
-   analysis/playground.md
\ No newline at end of file
diff --git a/docs/how_to/callbacks.rst b/docs/how_to/callbacks.rst
deleted file mode 100644
index a8a1558ed3..0000000000
--- a/docs/how_to/callbacks.rst
+++ /dev/null
@@ -1,44 +0,0 @@
-📞 Callbacks
-==============================
-
-LlamaIndex provides callbacks to help debug, track, and trace the inner workings of the library. 
-Using the callback manager, as many callbacks as needed can be added.
-
-In addition to logging data related to events, you can also track the duration and number of occurances
-of each event. 
-
-Furthermore, a trace map of events is also recorded, and callbacks can use this data
-however they want. For example, the :code:`LlamaDebugHandler` will, by default, print the trace of events
-after most operations.
-
-While each callback may not leverage each event type, the following events are available to be tracked:
-
-- CHUNKING -> Logs for the before and after of text splitting.
-- NODE_PARSING -> Logs for the documents and the nodes that they are parsed into.
-- EMBEDDING -> Logs for the number of texts embedded.
-- LLM -> Logs for the template and response of LLM calls.
-- QUERY -> Keeps track of the start and end of each query.
-- RETRIEVE -> Logs for the nodes retrieved for a query.
-- SYNTHESIZE -> Logs for the result for synthesize calls.
-- TREE -> Logs for the summary and level of summaries generated.
-- SUB_QUESTIONS -> Logs for the sub questions and answers generated.
-
-You can implement your own callback to track and trace these events, or use an existing callback.
-
-Currently supported callbacks are as follows:
-
-- TokenCountingHandler -> Flexible token counting for prompt, completion, and embedding token usage. `See the migration details here <./callbacks/token_counting_migration.html>`_ 
-- LlamaDebugHanlder -> Basic tracking and tracing for events. Example usage can be found in the notebook below.
-- WandbCallbackHandler -> Tracking of events and traces using the Wandb Prompts frontend. More details are in the notebook below or at `Wandb <https://docs.wandb.ai/guides/prompts/quickstart>`_
-- AimCallback -> Tracking of LLM inputs and outputs. Example usage can be found in the notebook below.
-
-.. toctree::
-   :maxdepth: 1
-   :caption: Callbacks
-
-   ./callbacks/token_counting_migration.md
-   ../examples/callbacks/TokenCountingHandler.ipynb
-   ../examples/callbacks/LlamaDebugHandler.ipynb
-   ../examples/callbacks/WandbCallbackHandler.ipynb
-   ../examples/callbacks/AimCallback.ipynb
-   ../../reference/callbacks.rst
\ No newline at end of file
diff --git a/docs/how_to/connector/modules.md b/docs/how_to/connector/modules.md
deleted file mode 100644
index 307d5a6e30..0000000000
--- a/docs/how_to/connector/modules.md
+++ /dev/null
@@ -1,31 +0,0 @@
-# Module Guides
-
-
-```{toctree}
----
-maxdepth: 1
----
-../../examples/data_connectors/PsychicDemo.ipynb
-../../examples/data_connectors/DeepLakeReader.ipynb
-../../examples/data_connectors/QdrantDemo.ipynb
-../../examples/data_connectors/DiscordDemo.ipynb
-../../examples/data_connectors/MongoDemo.ipynb
-../../examples/data_connectors/ChromaDemo.ipynb
-../../examples/data_connectors/MyScaleReaderDemo.ipynb
-../../examples/data_connectors/FaissDemo.ipynb
-../../examples/data_connectors/ObsidianReaderDemo.ipynb
-../../examples/data_connectors/SlackDemo.ipynb
-../../examples/data_connectors/WebPageDemo.ipynb
-../../examples/data_connectors/PineconeDemo.ipynb
-../../examples/data_connectors/MboxReaderDemo.ipynb
-../../examples/data_connectors/MilvusReaderDemo.ipynb
-../../examples/data_connectors/NotionDemo.ipynb
-../../examples/data_connectors/GithubRepositoryReaderDemo.ipynb
-../../examples/data_connectors/GoogleDocsDemo.ipynb
-../../examples/data_connectors/DatabaseReaderDemo.ipynb
-../../examples/data_connectors/TwitterDemo.ipynb
-../../examples/data_connectors/WeaviateDemo.ipynb
-../../examples/data_connectors/MakeDemo.ipynb
-../../examples/data_connectors/deplot/DeplotReader.ipynb
-```
-
diff --git a/docs/how_to/customization.rst b/docs/how_to/customization.rst
deleted file mode 100644
index ce250de0ed..0000000000
--- a/docs/how_to/customization.rst
+++ /dev/null
@@ -1,23 +0,0 @@
-🛠️ Customization
-=============
-
-LlamaIndex provides the ability to customize the following components:
-
-- LLM
-- Prompts
-- Embedding model
-- Documents
-
-These components and related classes are described in their respective guides below.
-
-.. toctree::
-   :maxdepth: 1
-   :caption: Customizable Modules
-
-   customization/custom_llms.md
-   customization/custom_prompts.md
-   customization/embeddings.md
-   customization/service_context.md
-   storage/customization.md
-   customization/streaming.md
-   customization/custom_documents.md
diff --git a/docs/how_to/customization/embeddings.md b/docs/how_to/customization/embeddings.md
deleted file mode 100644
index f90cd7cc4c..0000000000
--- a/docs/how_to/customization/embeddings.md
+++ /dev/null
@@ -1,113 +0,0 @@
-# Embedding support
-
-LlamaIndex provides support for embeddings in the following format:
-- Adding embeddings to Document objects
-- Using a Vector Store as an underlying index (e.g. `VectorStoreIndex`)
-- Querying our list and tree indices with embeddings.
-
-## Adding embeddings to Document objects
-
-You can pass in user-specified embeddings when constructing an index. This gives you control
-in specifying embeddings per Document instead of having us determine embeddings for your text (see below).
-
-Simply specify the `embedding` field when creating a Document:
-
-![](/_static/embeddings/doc_example.jpeg)
-
-## Using a Vector Store as an Underlying Index
-
-Please see the corresponding section in our [Vector Stores](/how_to/integrations/vector_stores.md)
-guide for more details.
-
-## Using an Embedding Query Mode in List/Tree Index
-
-LlamaIndex provides embedding support to our tree and list indices. In addition to each node storing text, each node can optionally store an embedding.
-During query-time, we can use embeddings to do max-similarity retrieval of nodes before calling the LLM to synthesize an answer. 
-Since similarity lookup using embeddings (e.g. using cosine similarity) does not require a LLM call, embeddings serve as a cheaper lookup mechanism instead
-of using LLMs to traverse nodes.
-
-#### How are Embeddings Generated?
-
-Since we offer embedding support during *query-time* for our list and tree indices, 
-embeddings are lazily generated and then cached (if `retriever_mode="embedding"` is specified during `query(...)`), and not during index construction.
-This design choice prevents the need to generate embeddings for all text chunks during index construction.
-
-NOTE: Our [vector-store based indices](/how_to/integrations/vector_stores.md) generate embeddings during index construction.
-
-#### Embedding Lookups
-For the list index (`ListIndex`):
-- We iterate through every node in the list, and identify the top k nodes through embedding similarity. We use these nodes to synthesize an answer.
-- See the [List Retriever API](/reference/query/retrievers/list.rst) for more details.
-- NOTE: the embedding-mode usage of the list index is roughly equivalent with the usage of our `VectorStoreIndex`; the main
-    difference is when embeddings are generated (during query-time for the list index vs. index construction for the simple vector index).
-
-For the tree index (`TreeIndex`):
-- We start with the root nodes, and traverse down the tree by picking the child node through embedding similarity.
-- See the [Tree Query API](/reference/query/retrievers/tree.rst) for more details.
-
-**Example Notebook**
-
-An example notebook is given [here](https://github.com/jerryjliu/llama_index/blob/main/examples/test_wiki/TestNYC_Embeddings.ipynb).
-
-
-
-(custom-embeddings)=
-## Custom Embeddings
-
-LlamaIndex allows you to define custom embedding modules. By default, we use `text-embedding-ada-002` from OpenAI. 
-
-You can also choose to plug in embeddings from
-Langchain's [embeddings](https://langchain.readthedocs.io/en/latest/reference/modules/embeddings.html) module.
-We introduce a wrapper class, 
-[`LangchainEmbedding`](/reference/service_context/embeddings.rst), for integration into LlamaIndex.
-
-An example snippet is shown below (to use Hugging Face embeddings) on the ListIndex:
-
-```python
-from llama_index import ListIndex, SimpleDirectoryReader
-from langchain.embeddings.huggingface import HuggingFaceEmbeddings
-from llama_index import LangchainEmbedding, ServiceContext
-
-# load in HF embedding model from langchain
-embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
-service_context = ServiceContext.from_defaults(embed_model=embed_model)
-
-# build index
-documents = SimpleDirectoryReader('../paul_graham_essay/data').load_data()
-new_index = ListIndex.from_documents(documents)
-
-# query with embed_model specified
-query_engine = new_index.as_query_engine(
-    retriever_mode="embedding", 
-    verbose=True, 
-    service_context=service_context
-)
-response = query_engine.query("<query_text>")
-print(response)
-```
-
-Another example snippet is shown for VectorStoreIndex.
-
-```python
-from llama_index import VectorStoreIndex, SimpleDirectoryReader
-from langchain.embeddings.huggingface import HuggingFaceEmbeddings
-from llama_index import LangchainEmbedding, ServiceContext
-
-# load in HF embedding model from langchain
-embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
-service_context = ServiceContext.from_defaults(embed_model=embed_model)
-
-# load index
-documents = SimpleDirectoryReader('../paul_graham_essay/data').load_data()
-new_index = VectorStoreIndex.from_documents(
-    documents, 
-    service_context=service_context,
-)
-
-# query will use the same embed_model
-query_engine = new_index.as_query_engine(
-    verbose=True, 
-)
-response = query_engine.query("<query_text>")
-print(response)
-```
diff --git a/docs/how_to/customization/llms_migration_guide.md b/docs/how_to/customization/llms_migration_guide.md
deleted file mode 100644
index 7d1bd5312b..0000000000
--- a/docs/how_to/customization/llms_migration_guide.md
+++ /dev/null
@@ -1,55 +0,0 @@
-# Migration Guide for Using LLMs in LlamaIndex
-
-We have made some changes to the configuration of LLMs in LLamaIndex to improve its functionality and ease of use.
-
-Previously, the primary abstraction for an LLM was the `LLMPredictor`. Now, we have upgraded to a new abstraction called `LLM`, which offers a cleaner and more user-friendly interface.
-
-These changes will only affect you if you were using the `ChatGPTLLMPredictor`, `HuggingFaceLLMPredictor`, or a custom implementation subclassing `LLMPredictor`.
-
-## If you were using `ChatGPTLLMPredictor`:
-We have removed the `ChatGPTLLMPredictor`, but you can still achieve the same functionality using our new `OpenAI` class.
-
-## If you were using `HuggingFaceLLMPredictor`:
-We have updated the HuggingFace support to utilize the latest `LLM` abstraction through `HuggingFaceLLM`. To use it, initialize the `HuggingFaceLLM` in the same way as before. Instead of passing it as the `llm_predictor` argument to the service context, you now need to pass it as the `llm` argument.
-
-Old:
-```python
-hf_predictor = HuggingFaceLLMPredictor(...)
-service_context = ServiceContext.from_defaults(llm_predictor=hf_predictor)
-```
-
-New:
-```python
-llm = HuggingFaceLLM(...)
-service_context = ServiceContext.from_defaults(llm=llm)
-```
-
-## If you were subclassing `LLMPredictor`:
-We have refactored the `LLMPredictor` class and removed some outdated logic, which may impact your custom class. The recommended approach now is to implement the `llama_index.llms.base.LLM` interface when defining a custom LLM. Alternatively, you can subclass the `llama_index.llms.custom.CustomLLM` base class for a simpler implementation.
-
-Here's an example:
-
-```python
-from llama_index.llms.base import CompletionResponse, LLMMetadata, StreamCompletionResponse
-from llama_index.llms.custom import CustomLLM
-
-class YourLLM(CustomLLM):
-    def __init__(self, ...): 
-        # initialization logic
-        pass
-
-    @property
-    def metadata(self) -> LLMMetadata:
-        # metadata
-        pass
-
-    def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
-        # completion endpoint
-        pass
-
-    def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
-        # streaming completion endpoint
-        pass
-```
-
-For further reference, you can look at `llama_index/llms/huggingface.py`.
\ No newline at end of file
diff --git a/docs/how_to/customization/service_context.md b/docs/how_to/customization/service_context.md
deleted file mode 100644
index e79c192267..0000000000
--- a/docs/how_to/customization/service_context.md
+++ /dev/null
@@ -1,78 +0,0 @@
-# ServiceContext
-
-The ServiceContext object encapsulates the resources used to create indexes and run queries.
-
-The following optional items can be set in the service context:
-
-- llm_predictor: The LLM used to generate natural language responses to queries.
-- embed_model: The embedding model used to generate vector representations of text.
-- prompt_helper: The PromptHelper object that helps with truncating and repacking text chunks to fit in the LLM's context window.
-- node_parser: The parser that converts documents into nodes.
-- callback_managaer: The callback manager object that calls it's handlers on events. Provides basic logging and tracing capabilities.
-
-We also expose some common kwargs (of the above components) via the `ServiceContext.from_defaults` method
-for convenience (so you don't have to manually construct them).
- 
-Kwargs node parser:
-- chunk_size: The size of the text chunk for a node . Is used for the node parser when they aren't provided.
-- chunk overlap: The amount of overlap between nodes.
-
-Kwargs for prompt helper:
-- context_window: The size of the context window of the LLM. Typically we set this 
-  automatically with the model metadata. But we also allow explicit override via this parameter
-  for additional control (or in case the default is not available for certain latest
-  models)
-- num_output: The number of maximum output from the LLM. Typically we set this
-  automatically given the model metadata. This parameter does not actually limit the model
-  output, it affects the amount of "space" we save for the output, when computing 
-  available context window size for packing text from retrieved Nodes.
-
-Here's a complete example that sets up all objects using their default settings:
-
-```python
-from langchain.llms import OpenAI
-from llama_index import ServiceContext, LLMPredictor, OpenAIEmbedding, PromptHelper
-from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
-from llama_index.node_parser import SimpleNodeParser
-
-llm_predictor = LLMPredictor(llm=OpenAI(model_name='text-davinci-003', temperature=0, max_tokens=256))
-embed_model = OpenAIEmbedding()
-node_parser = SimpleNodeParser(text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20))
-prompt_helper = PromptHelper(context_window=4096, num_output=256, chunk_overlap_ratio=0.1, chunk_size_limit=None)
-service_context = ServiceContext.from_defaults(
-  llm_predictor=llm_predictor,
-  embed_model=embed_model,
-  node_parser=node_parser,
-  prompt_helper=prompt_helper
-)
-```
-
-## Global ServiceContext
-
-You can specify a different set of defaults for the ServiceContext by setting up a global service context.
-
-With a global service context, any attributes not provided when calling `ServiceContext.from_defaults()` will be pulled from your global service context. If you never define a service context anywhere else, then the global service context will always be used.
-
-Here's a quick example of what a global service context might look like. This service context changes the LLM to `gpt-3.5-turbo`, changes the `chunk_size`, and sets up a `callback_manager` to trace events using the `LlamaDebugHandler`.
-
-First, define the service context:
-
-```python
-from langchain.chat_models import ChatOpenAI
-from llama_index import ServiceContext, LLMPredictor
-from llama_index.callbacks import CallbackManager, LlamaDebugHandler
-
-llama_debug = LlamaDebugHandler(print_trace_on_end=True)
-callback_manager = CallbackManager([llama_debug])
-
-llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0)
-llm_predictor = LLMPredictor(llm=llm)
-service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size=512, callback_manager=callback_manager)
-```
-
-Then, set the global service context object
-
-```python
-from llama_index import set_global_service_context
-set_global_service_context(service_context)
-```
diff --git a/docs/how_to/integrations.rst b/docs/how_to/integrations.rst
deleted file mode 100644
index 47bddfc321..0000000000
--- a/docs/how_to/integrations.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-⛓️ Integrations
-============
-
-LlamaIndex provides a diverse range of integrations with other toolsets and storage providers. 
-
-Some of these integrations are provided in more detailed guides below.
-
-.. toctree::
-   :maxdepth: 1
-   :caption: Integrations
-
-   integrations/vector_stores.md
-   integrations/chatgpt_plugins.md
-   integrations/using_with_langchain.md
-   integrations/graphsignal.md
-   integrations/guidance.md
-   integrations/trulens.md
diff --git a/docs/how_to/query_engine/advanced/response_synthesis.md b/docs/how_to/query_engine/advanced/response_synthesis.md
deleted file mode 100644
index f0bae8dacd..0000000000
--- a/docs/how_to/query_engine/advanced/response_synthesis.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Response Synthesis
-
-LlamaIndex offers different methods of synthesizing a response from relevant context. 
-The way to toggle this can be found in our [Usage Pattern Guide](setting-response-mode). 
-Below, we visually highlight how each response mode works.
-
-
-### Refine
-
-Refine is an iterative way of generating a response. We first use the context in the first node, along
-with the query, to generate an initial answer. We then pass this answer, the query, and the context of the second node
-as input into a "refine prompt" to generate a refined answer. We refine through N-1 nodes, where N is the total 
-number of nodes.
-
-![](/_static/indices/create_and_refine.png)
-
-### [Default] Compact and Refine
-Compact and refine mode first combine text chunks into larger consolidated chunks 
-that more fully utilize the available context window, then refine answers across them.
-
-This mode is faster than refine since we make fewer calls to the LLM.
-
-
-### Tree Summarize
-
-Tree summarize is another way of generating a response. We essentially build a tree index
-over the set of candidate nodes, with a *summary prompt* seeded with the query. The tree
-is built in a bottoms-up fashion, and in the end the root node is returned as the response.
-
-![](/_static/indices/tree_summarize.png)
-
-
-### Simple Summarize
-Simply combine all text chunks into one, and make a single call to the LLM.
-
-### Generation
-Ignore all text chunks, make a single call to the LLM with just the query and no additional context.  
-
-See [references](/reference/query/response_synthesizer.rst) for full details
\ No newline at end of file
diff --git a/docs/how_to/query_engine/advanced/root.md b/docs/how_to/query_engine/advanced/root.md
deleted file mode 100644
index 3a965f4b04..0000000000
--- a/docs/how_to/query_engine/advanced/root.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Advanced Concepts
-
-
-```{toctree}
----
-maxdepth: 1
----
-query_transformations.md
-second_stage.md
-response_synthesis.md
-```
\ No newline at end of file
diff --git a/docs/how_to/query_engine/advanced/second_stage.md b/docs/how_to/query_engine/advanced/second_stage.md
deleted file mode 100644
index 6e9b9100d0..0000000000
--- a/docs/how_to/query_engine/advanced/second_stage.md
+++ /dev/null
@@ -1,216 +0,0 @@
-# Second-Stage Processing
-
-By default, when a query is executed on an index or a composed graph, 
-LlamaIndex performs the following steps:
-1. **Retrieval step**: Retrieve a set of nodes from the index given the query. 
-2. **Synthesis step**: Synthesize a response over the set of nodes.
-
-Beyond standard retrieval and synthesis, LlamaIndex also provides a collection of modules
-for advanced **second-stage processing** (i.e. after retrieval and before synthesis).
-
-After retrieving the initial candidate nodes, these modules further improve
-the quality and diversity of the nodes used for synthesis by e.g. filtering, re-ranking, or augmenting.
-Examples include keyword filters, LLM-based re-ranking, and temporal-reasoning based augmentation.
-
-
-We first provide the high-level API interface, and provide some example modules, and finally discuss usage.
-
-We are also very open to contributions! Take a look at our [contribution guide](https://github.com/jerryjliu/llama_index/blob/main/CONTRIBUTING.md) if you 
-are interested in contributing a Postprocessor.
-
-## API Interface
-
-The base class is `BaseNodePostprocessor`, and the API interface is very simple: 
-
-```python
-
-class BaseNodePostprocessor:
-    """Node postprocessor."""
-
-    @abstractmethod
-    def postprocess_nodes(
-        self, nodes: List[NodeWithScore], query_bundle: Optional[QueryBundle]
-    ) -> List[NodeWithScore]:
-        """Postprocess nodes."""
-```
-
-It takes in a list of Node objects, and outputs another list of Node objects.
-
-The full API reference can be found [here](/reference/node_postprocessor.rst).
-
-
-## Example Usage
-
-The postprocessor can be used as part of a `QueryEngine`, or on its own.
-
-#### Index querying
-
-```python
-
-from llama_index.indices.postprocessor import (
-    FixedRecencyPostprocessor,
-)
-node_postprocessor = FixedRecencyPostprocessor(service_context=service_context)
-
-query_engine = index.as_query_engine(
-    similarity_top_k=3,
-    node_postprocessors=[node_postprocessor]
-)
-response = query_engine.query(
-    "How much did the author raise in seed funding from Idelle's husband (Julian) for Viaweb?", 
-)
-
-```
-
-
-#### Using as Independent Module (Lower-Level Usage)
-
-The module can also be used on its own as part of a broader flow. For instance,
-here's an example where you choose to manually postprocess an initial set of source nodes.
-
-```python
-from llama_index.indices.postprocessor import (
-    FixedRecencyPostprocessor,
-)
-
-# get initial response from vector index
-query_engine = index.as_query_engine(
-    similarity_top_k=3,
-    response_mode="no_text"
-)
-init_response = query_engine.query(query_str)
-resp_nodes = [n.node for n in init_response.source_nodes]
-
-# use node postprocessor to filter nodes
-node_postprocessor = FixedRecencyPostprocessor(service_context=service_context)
-new_nodes = node_postprocessor.postprocess_nodes(resp_nodes)
-
-# use list index to synthesize answers
-list_index = ListIndex(new_nodes)
-query_engine = list_index.as_query_engine(
-    node_postprocessors=[node_postprocessor]
-)
-response = query_engine.query(query_str)
-```
-
-
-## Example Modules
-
-### Default Postprocessors
-
-These postprocessors are simple modules that are already included by default.
-
-#### KeywordNodePostprocessor
-
-A simple postprocessor module where you are able to specify `required_keywords` or `exclude_keywords`.
-This will filter out nodes that don't have required keywords, or contain excluded keywords.
-
-#### SimilarityPostprocessor
-
-The SimilarityPostprocessor module allows you to filter nodes based on their similarity to a reference node using cosine similarity. By setting a `similarity_cutoff`, you can define a minimum similarity score that nodes must exceed to be considered relevant. This helps retrieve nodes that are closely related to the reference node in terms of their semantic meaning. Leveraging cosine similarity as a default, this postprocessor enhances the retrieval process by fine-tuning the relevance of the retrieved nodes based on their similarity to the reference node.
-
-For example, if you have a reference node and you want to retrieve nodes that are highly similar to it, you can set a high similarity cutoff. This will exclude nodes with low similarity scores and only retain nodes that have a similarity score above the specified cutoff.
-
-#### Previous/Next Postprocessors
-
-These postprocessors are able to exploit temporal relationships between nodes
-(e.g. prev/next relationships) in order to retrieve additional
-context, in the event that the existing context may not directly answer
-the question. They augment the set of retrieved nodes with context
-either in the future or the past (or both).
-
-The most basic version is `PrevNextNodePostprocessor`, which takes a fixed
-`num_nodes` as well as `mode` specifying "previous", "next", or "both".
-
-We also have `AutoPrevNextNodePostprocessor`, which is able to infer
-the `previous`, `next` direction.
-
-![](/_static/node_postprocessors/prev_next.png)
-
-
-#### Recency Postprocessors
-
-These postprocessors are able to ensure that only the most recent
-data is used as context, and that out of date context information is filtered out.
-
-Imagine that you have three versions of a document, with slight changes between versions. For instance, this document may be describing patient history. If you ask a question over this data, you would want to make sure that you're referencing the latest document, and that out of date information is not passed in.
-
-We support recency filtering through the following modules.
-
-**`FixedRecencyPostProcessor`**: sorts retrieved nodes by date in reverse order, and takes a fixed top-k set of nodes.
-
-![](/_static/node_postprocessors/recency.png)
-
-**`EmbeddingRecencyPostprocessor`**: sorts retrieved nodes by date in reverse order, and then
-looks at subsequent nodes and filters out nodes that have high embedding 
-similarity with the current node. This allows us to maintain recent Nodes
-that have "distinct" context, but filter out overlapping Nodes that
-are outdated and overlap with more recent context.
-
-
-**`TimeWeightedPostprocessor`**: adds time-weighting to retrieved nodes, using the formula `(1-time_decay) ** hours_passed`.
-The recency score is added to any score that the node already contains.
-
-
-# Token Optimizer PostProcessor
-
-Our **`SentenceEmbeddingOptimizer`** is a postprocessor that will optimize for token usage. It does this by removing words and sentences that are not relevant to the query.
-
-Here is a sample code snippet on comparing the outputs without optimization and with.
-
-```python
-from llama_index import VectorStoreIndex
-from llama_index.indices.postprocessor import SentenceEmbeddingOptimizer
-print("Without optimization")
-start_time = time.time()
-query_engine = index.as_query_engine()
-res = query_engine.query("What is the population of Berlin?")
-end_time = time.time()
-print("Total time elapsed: {}".format(end_time - start_time))
-print("Answer: {}".format(res))
-
-print("With optimization")
-start_time = time.time()
-query_engine = index.as_query_engine(
-    node_postprocessors=[SentenceEmbeddingOptimizer(percentile_cutoff=0.5)]
-)
-res = query_engine.query("What is the population of Berlin?")
-end_time = time.time()
-print("Total time elapsed: {}".format(end_time - start_time))
-print("Answer: {}".format(res))
-
-```
-
-Output:
-```text
-Without optimization
-INFO:root:> [query] Total LLM token usage: 3545 tokens
-INFO:root:> [query] Total embedding token usage: 7 tokens
-Total time elapsed: 2.8928110599517822
-Answer: 
-The population of Berlin in 1949 was approximately 2.2 million inhabitants. After the fall of the Berlin Wall in 1989, the population of Berlin increased to approximately 3.7 million inhabitants.
-
-With optimization
-INFO:root:> [optimize] Total embedding token usage: 7 tokens
-INFO:root:> [query] Total LLM token usage: 1779 tokens
-INFO:root:> [query] Total embedding token usage: 7 tokens
-Total time elapsed: 2.346346139907837
-Answer: 
-The population of Berlin is around 4.5 million.
-```
-
-Full [example notebook here](https://github.com/jerryjliu/llama_index/blob/main/docs/examples/node_postprocessor/OptimizerDemo.ipynb).
-
-```{toctree}
----
-caption: Examples
-maxdepth: 1
----
-/examples/node_postprocessor/PrevNextPostprocessorDemo.ipynb
-/examples/node_postprocessor/RecencyPostprocessorDemo.ipynb
-/examples/node_postprocessor/TimeWeightedPostprocessorDemo.ipynb
-/examples/node_postprocessor/PII.ipynb
-/examples/node_postprocessor/CohereRerank.ipynb
-/examples/node_postprocessor/LLMReranker-Gatsby.ipynb
-/examples/node_postprocessor/OptimizerDemo.ipynb
-```
\ No newline at end of file
diff --git a/docs/how_to/retriever/root.md b/docs/how_to/retriever/root.md
deleted file mode 100644
index 888892357d..0000000000
--- a/docs/how_to/retriever/root.md
+++ /dev/null
@@ -1,30 +0,0 @@
-
-# 🔍 Retriever
-## Concept
-Retrievers are responsible for fetching the most relevant context given a user query (or chat message).  
-
-It can be built on top of [Indices](/how_to/index/root.md), but can also be defined independently.
-It is used as a key building block in [Query Engines](/how_to/query_engine/root.md) (and [Chat Engines](/how_to/chat_engine/root.md)) for retrieving relevant context.
-
-## Usage Pattern
-Get started with:
-```python
-retriever = index.as_retriever()
-nodes = retriever.retrieve("Who is Paul Graham?")
-```
-
-```{toctree}
----
-maxdepth: 2
----
-usage_pattern.md
-```
-
-
-## Modules
-```{toctree}
----
-maxdepth: 2
----
-modules.md
-```
\ No newline at end of file
diff --git a/docs/how_to/storage.rst b/docs/how_to/storage.rst
deleted file mode 100644
index 67c95a08e5..0000000000
--- a/docs/how_to/storage.rst
+++ /dev/null
@@ -1,39 +0,0 @@
-💾 Storage
-============
-
-LlamaIndex provides a high-level interface for ingesting, indexing, and querying your external data.
-By default, LlamaIndex hides away the complexities and let you query your data in `under 5 lines of code </how_to/storage/customization.html>`_.
-
-
-Under the hood, LlamaIndex also supports swappable **storage components** that allows you to customize:
-
-- **Document stores**: where ingested documents (i.e., `Node` objects) are stored,
-- **Index stores**: where index metadata are stored,
-- **Vector stores**: where embedding vectors are stored.
-
-The Document/Index stores rely on a common Key-Value store abstraction, which is also detailed below.
-
-LlamaIndex supports persisting data to any storage backend supported by `fsspec <https://filesystem-spec.readthedocs.io/en/latest/index.html>`_. 
-We have confirmed support for the following storage backends:
-
-- Local filesystem
-- AWS S3
-- Cloudflare R2
-
-For an example of how to use LlamaIndex with Cloudflare R2, see `this example </examples/vector_stores/SimpleIndexOnS3.html>`_.
-
-
-.. image:: ../_static/storage/storage.png
-   :class: only-light
-
-
-.. toctree::
-   :maxdepth: 1
-   :caption: Storage
-
-   storage/save_load.md
-   storage/customization.md
-   storage/docstores.md
-   storage/index_stores.md
-   storage/vector_stores.md
-   storage/kv_stores.md
\ No newline at end of file
diff --git a/docs/how_to/storage/vector_stores.md b/docs/how_to/storage/vector_stores.md
deleted file mode 100644
index 160dbc5cd2..0000000000
--- a/docs/how_to/storage/vector_stores.md
+++ /dev/null
@@ -1,65 +0,0 @@
-# Vector Stores
-
-Vector stores contain embedding vectors of ingested document chunks 
-(and sometimes the document chunks as well).
-
-## Simple Vector Store
-By default, LlamaIndex uses a simple in-memory vector store that's great for quick experimentation.
-They can be persisted to (and loaded from) disk by calling `vector_store.persist()` (and `SimpleVectorStore.from_persist_path(...)` respectively).
-
-## Third-Party Vector Store Integrations
-We also integrate with a wide range of vector store implementations. 
-They mainly differ in 2 aspects:
-1. in-memory vs. hosted
-2. stores only vector embeddings vs. also stores documents
-
-### In-Memory Vector Stores
-* Faiss
-* Chroma
-
-### (Self) Hosted Vector Stores
-* Pinecone
-* Weaviate
-* Milvus/Zilliz
-* Qdrant
-* Chroma
-* Opensearch
-* DeepLake
-* MyScale
-* Tair
-* DocArray
-* MongoDB Atlas
-
-### Others
-* ChatGPTRetrievalPlugin
-
-For more details, see [Vector Store Integrations](/how_to/integrations/vector_stores.md).
-
-```{toctree}
----
-caption: Examples
-maxdepth: 1
----
-../../examples/vector_stores/SimpleIndexDemo.ipynb
-../../examples/vector_stores/QdrantIndexDemo.ipynb
-../../examples/vector_stores/FaissIndexDemo.ipynb
-../../examples/vector_stores/DeepLakeIndexDemo.ipynb
-../../examples/vector_stores/MyScaleIndexDemo.ipynb
-../../examples/vector_stores/MetalIndexDemo.ipynb
-../../examples/vector_stores/WeaviateIndexDemo.ipynb
-../../examples/vector_stores/OpensearchDemo.ipynb
-../../examples/vector_stores/PineconeIndexDemo.ipynb
-../../examples/vector_stores/ChromaIndexDemo.ipynb
-../../examples/vector_stores/LanceDBIndexDemo.ipynb
-../../examples/vector_stores/MilvusIndexDemo.ipynb
-../../examples/vector_stores/RedisIndexDemo.ipynb
-../../examples/vector_stores/WeaviateIndexDemo-Hybrid.ipynb
-../../examples/vector_stores/PineconeIndexDemo-Hybrid.ipynb
-../../examples/vector_stores/AsyncIndexCreationDemo.ipynb
-../../examples/vector_stores/TairIndexDemo.ipynb
-../../examples/vector_stores/SupabaseVectorIndexDemo.ipynb
-../../examples/vector_stores/DocArrayHnswIndexDemo.ipynb
-../../examples/vector_stores/DocArrayInMemoryIndexDemo.ipynb
-../../examples/vector_stores/MongoDBAtlasVectorSearch.ipynb
-```
-
diff --git a/docs/index.rst b/docs/index.rst
index 4da09c9470..0d8fd2681a 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -53,83 +53,81 @@ to fit their needs.
 
    getting_started/installation.md
    getting_started/starter_example.md
+   getting_started/concepts.md
+   getting_started/customization.rst
 
 .. toctree::
    :maxdepth: 2
-   :caption: Development
+   :caption: End-to-End Tutorials
    :hidden:
 
-   development/contributing.rst
-   development/documentation.rst
-   development/privacy.md
-   development/changelog.rst
-
+   end_to_end_tutorials/usage_pattern.md
+   end_to_end_tutorials/discover_llamaindex.md
+   end_to_end_tutorials/use_cases.md
+   
 .. toctree::
-   :maxdepth: 2
-   :caption: Guides
+   :maxdepth: 1
+   :caption: Index/Data Modules
    :hidden:
 
-   guides/primer.rst
-   guides/tutorials.rst
-
+   core_modules/data_modules/connector/root.md
+   core_modules/data_modules/documents_and_nodes/root.md
+   core_modules/data_modules/node_parsers/root.md
+   core_modules/data_modules/storage/root.md
+   core_modules/data_modules/index/root.md
 
 .. toctree::
-   :maxdepth: 2
-   :caption: Use Cases
+   :maxdepth: 1
+   :caption: Query Modules
    :hidden:
 
-   use_cases/queries.md
-   use_cases/agents.md
-   use_cases/apps.md
-
+   core_modules/query_modules/retriever/root.md
+   core_modules/query_modules/node_postprocessors/root.md
+   core_modules/query_modules/response_synthesizers/root.md
+   core_modules/query_modules/structured_outputs/root.md
+   core_modules/query_modules/query_engine/root.md
+   core_modules/query_modules/chat_engines/root.md
 
 .. toctree::
    :maxdepth: 1
-   :caption: Key Components
+   :caption: Model Modules
    :hidden:
 
-   how_to/connector/root.md
-   how_to/index/root.md
-   how_to/retriever/root.md
-   how_to/query_engine/root.md
-   how_to/chat_engine/root.md
-   how_to/customization.rst
-   how_to/analysis.rst
-   how_to/structured_outputs/root.md
-   how_to/evaluation/evaluation.md
-   how_to/integrations.rst
-   how_to/callbacks.rst
-   how_to/storage.rst
-
+   core_modules/model_modules/llms/root.md
+   core_modules/model_modules/embeddings/root.md
+   core_modules/model_modules/prompts.md
 
 .. toctree::
    :maxdepth: 1
-   :caption: Reference
+   :caption: Supporting Modules
+
+   core_modules/supporting_modules/service_context.md
+   core_modules/supporting_modules/callbacks/root.md
+   core_modules/supporting_modules/evaluation/root.md
+   core_modules/supporting_modules/cost_analysis/root.md
+   core_modules/supporting_modules/playground/root.md
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Development
    :hidden:
 
-   reference/indices.rst
-   reference/query.rst
-   reference/node.rst
-   reference/llm_predictor.rst
-   reference/llms.rst
-   reference/node_postprocessor.rst
-   reference/storage.rst
-   reference/composability.rst
-   reference/readers.rst
-   reference/prompts.rst
-   reference/service_context.rst
-   reference/callbacks.rst
-   reference/struct_store.rst
-   reference/response.rst
-   reference/playground.rst
-   reference/node_parser.rst
-   reference/example_notebooks.rst
-   reference/langchain_integrations/base.rst
+   development/contributing.rst
+   development/documentation.rst
+   development/privacy.md
+   development/changelog.rst
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Community
+   :hidden:
 
+   community/integrations.md
+   community/app_showcase.md
 
 .. toctree::
    :maxdepth: 1
-   :caption: Gallery
+   :caption: API Reference
    :hidden:
 
-   gallery/app_showcase.md
+   api_reference/index.rst
diff --git a/docs/reference/query/response_synthesizer.rst b/docs/reference/query/response_synthesizer.rst
deleted file mode 100644
index 502e8caac6..0000000000
--- a/docs/reference/query/response_synthesizer.rst
+++ /dev/null
@@ -1,12 +0,0 @@
-.. _Ref-Response-Synthesizer:
-
-Response Synthesizer
-=====================
-
-.. automodule:: llama_index.indices.query.response_synthesis
-   :members:
-   :inherited-members:
-
-.. automodule:: llama_index.indices.response.type
-   :members:
-   :inherited-members:
\ No newline at end of file
diff --git a/docs/reference/service_context/llama_logger.rst b/docs/reference/service_context/llama_logger.rst
deleted file mode 100644
index d94ae6f263..0000000000
--- a/docs/reference/service_context/llama_logger.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. _Ref-Llama-Logger:
-
-Llama Logger 🪵
-=================
-
-.. automodule:: llama_index.logger
-   :members:
-   :inherited-members:
-
diff --git a/docs/reference/service_context/llm_predictor.rst b/docs/reference/service_context/llm_predictor.rst
deleted file mode 100644
index 2b7feb8d68..0000000000
--- a/docs/reference/service_context/llm_predictor.rst
+++ /dev/null
@@ -1,17 +0,0 @@
-.. _Ref-LLM-Predictor:
-
-LLMPredictor
-=================
-
-Our LLMPredictor is a wrapper around Langchain's `LLMChain` that allows easy integration into LlamaIndex.
-
-.. automodule:: llama_index.langchain_helpers.chain_wrapper
-   :members:
-   :inherited-members:
-
-
-Our MockLLMPredictor is used for token prediction. See `Cost Analysis How-To <../../how_to/analysis/cost_analysis.html>`_ for more information.
-
-.. automodule:: llama_index.token_counter.mock_chain_wrapper
-   :members:
-   :inherited-members:
diff --git a/docs/use_cases/apps.md b/docs/use_cases/apps.md
deleted file mode 100644
index ea9e88f3a6..0000000000
--- a/docs/use_cases/apps.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Integrations into LLM Applications
-
-LlamaIndex modules provide plug and play data loaders, data structures, and query interfaces. They can be used in your downstream LLM Application. Some of these applications are described below.
-
-### Chatbots
-
-Chatbots are an incredibly popular use case for LLM's. LlamaIndex gives you the tools to build Knowledge-augmented chatbots and agents.
-
-Relevant Resources:
-- [Building a Chatbot](/guides/tutorials/building_a_chatbot.md)
-- [Using with a LangChain Agent](/how_to/integrations/using_with_langchain.md)
-
-### Full-Stack Web Application
-
-LlamaIndex can be integrated into a downstream full-stack web application. It can be used in a backend server (such as Flask), packaged into a Docker container, and/or directly used in a framework such as Streamlit.
-
-We provide tutorials and resources to help you get started in this area.
-
-Relevant Resources:
-- [Fullstack Application Guide](/guides/tutorials/fullstack_app_guide.md)
-- [LlamaIndex Starter Pack](https://github.com/logan-markewich/llama_index_starter_pack)
-
diff --git a/llama_index/indices/tree/all_leaf_retriever.py b/llama_index/indices/tree/all_leaf_retriever.py
index 16b13cb66e..85083b0d90 100644
--- a/llama_index/indices/tree/all_leaf_retriever.py
+++ b/llama_index/indices/tree/all_leaf_retriever.py
@@ -1,7 +1,7 @@
 """Summarize query."""
 
 import logging
-from typing import List, cast
+from typing import Any, List, cast
 
 from llama_index.data_structs.data_structs import IndexGraph
 from llama_index.indices.base_retriever import BaseRetriever
@@ -28,7 +28,7 @@ class TreeAllLeafRetriever(BaseRetriever):
 
     """
 
-    def __init__(self, index: TreeIndex):
+    def __init__(self, index: TreeIndex, **kwargs: Any) -> None:
         self._index = index
         self._index_struct = index.index_struct
         self._docstore = index.docstore
diff --git a/llama_index/indices/tree/tree_root_retriever.py b/llama_index/indices/tree/tree_root_retriever.py
index 959429ca40..0456c5b558 100644
--- a/llama_index/indices/tree/tree_root_retriever.py
+++ b/llama_index/indices/tree/tree_root_retriever.py
@@ -1,6 +1,6 @@
 """Retrieve query."""
 import logging
-from typing import List
+from typing import Any, List
 
 from llama_index.indices.base_retriever import BaseRetriever
 from llama_index.indices.query.schema import QueryBundle
@@ -21,7 +21,7 @@ class TreeRootRetriever(BaseRetriever):
     attempt to parse information down the graph in order to synthesize an answer.
     """
 
-    def __init__(self, index: TreeIndex):
+    def __init__(self, index: TreeIndex, **kwargs: Any) -> None:
         self._index = index
         self._index_struct = index.index_struct
         self._docstore = index.docstore
diff --git a/llama_index/llms/__init__.py b/llama_index/llms/__init__.py
index 1ce5f45b21..0662941581 100644
--- a/llama_index/llms/__init__.py
+++ b/llama_index/llms/__init__.py
@@ -13,6 +13,7 @@ from llama_index.llms.base import (
 from llama_index.llms.custom import CustomLLM
 from llama_index.llms.huggingface import HuggingFaceLLM
 from llama_index.llms.langchain import LangChainLLM
+from llama_index.llms.mock import MockLLM
 from llama_index.llms.openai import OpenAI
 from llama_index.llms.palm import PaLM
 
@@ -23,6 +24,7 @@ __all__ = [
     "HuggingFaceLLM",
     "PaLM",
     "CustomLLM",
+    "MockLLM",
     "ChatMessage",
     "MessageRole",
     "ChatResponse",
diff --git a/llama_index/llms/mock.py b/llama_index/llms/mock.py
index 65ee4b9a4c..a9feef4df8 100644
--- a/llama_index/llms/mock.py
+++ b/llama_index/llms/mock.py
@@ -1,25 +1,43 @@
-from typing import Any
+from typing import Any, Optional
 
 from llama_index.llms.base import CompletionResponse, CompletionResponseGen, LLMMetadata
 from llama_index.llms.custom import CustomLLM
 
 
 class MockLLM(CustomLLM):
+    def __init__(self, max_tokens: Optional[int] = None):
+        self.max_tokens = max_tokens
+
     @property
     def metadata(self) -> LLMMetadata:
-        return LLMMetadata()
+        return LLMMetadata(num_output=self.max_tokens)
+
+    def _generate_text(self, length: int) -> str:
+        return " ".join(["text" for _ in range(length)])
 
     def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
+        response_text = (
+            self._generate_text(self.max_tokens) if self.max_tokens else prompt
+        )
+
         return CompletionResponse(
-            text=prompt,
+            text=response_text,
         )
 
     def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
-        def gen() -> CompletionResponseGen:
+        def gen_prompt() -> CompletionResponseGen:
             for ch in prompt:
                 yield CompletionResponse(
                     text=prompt,
                     delta=ch,
                 )
 
-        return gen()
+        def gen_response(max_tokens: int) -> CompletionResponseGen:
+            for i in range(max_tokens):
+                response_text = self._generate_text(i)
+                yield CompletionResponse(
+                    text=response_text,
+                    delta="text ",
+                )
+
+        return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
diff --git a/llama_index/playground/base.py b/llama_index/playground/base.py
index 601fdcac60..12f8f21e90 100644
--- a/llama_index/playground/base.py
+++ b/llama_index/playground/base.py
@@ -7,6 +7,7 @@ from typing import Any, Dict, List, Optional, Type, Union
 import pandas as pd
 from llama_index.bridge.langchain import get_color_mapping, print_text
 
+from llama_index.callbacks import CallbackManager, TokenCountingHandler
 from llama_index.indices.base import BaseIndex
 from llama_index.indices.list.base import ListIndex, ListRetrieverMode
 from llama_index.indices.tree.base import TreeIndex, TreeRetrieverMode
@@ -143,9 +144,18 @@ class Playground:
                     f"\033[1m{index_name}\033[0m, retriever mode = {retriever_mode}",
                     end="\n",
                 )
-                # TODO: refactor query mode
+
+                # insert token counter into service context
+                service_context = index.service_context
+                token_counter = TokenCountingHandler()
+                callback_manager = CallbackManager([token_counter])
+                service_context.llm_predictor.callback_manager = callback_manager
+                service_context.embed_model.callback_manager = callback_manager
+
                 try:
-                    query_engine = index.as_query_engine(retriever_mode=retriever_mode)
+                    query_engine = index.as_query_engine(
+                        retriever_mode=retriever_mode, service_context=service_context
+                    )
                 except ValueError:
                     continue
 
@@ -160,6 +170,9 @@ class Playground:
                         "Retriever Mode": retriever_mode,
                         "Output": str(output),
                         "Duration": duration,
+                        "Prompt Tokens": token_counter.prompt_llm_token_count,
+                        "Completion Tokens": token_counter.completion_llm_token_count,
+                        "Embed Tokens": token_counter.total_embedding_token_count,
                     }
                 )
         print(f"\nRan {len(result)} combinations in total.")
-- 
GitLab