From 7b04978ce827f4fcde02915ac30cb4d0ad5a0084 Mon Sep 17 00:00:00 2001 From: Brace Sproul Date: Mon, 5 Feb 2024 13:29:29 -0800 Subject: [PATCH] docs[minor]: Add new QA & RAG use case section (#4211) * added index & quickstart pages * sources page * chat history notebook * streaming * one more streaming cell * cr * almost finished * agents notebook * local models * cr * commit quickstart * gitignore .md files * docs * cr * cr * lets goooooo * chore: lint files * fix links * cr * drop unnecessary dep * cer * cr * cr --- .gitignore | 3 +- CONTRIBUTING.md | 26 + deno.json | 14 + docs/core_docs/.gitignore | 2 + .../docs/{community.md => community.mdx} | 0 .../{databerry.md => databerry.mdx} | 0 .../{helicone.md => helicone.mdx} | 0 .../docs/get_started/introduction.mdx | 2 +- .../{deployment.md => deployment.mdx} | 0 .../production/{tracing.md => tracing.mdx} | 0 .../docs/{security.md => security.mdx} | 0 .../{rag => }/code_understanding.mdx | 2 +- .../question_answering/_category_.yml | 2 +- .../advanced_conversational_qa.mdx | 281 ---- .../question_answering/chat_history.ipynb | 351 +++++ .../question_answering/citations.ipynb | 1302 +++++++++++++++++ .../conversational_retrieval_agents.ipynb | 457 ++++++ .../conversational_retrieval_agents.mdx | 207 --- .../use_cases/question_answering/index.mdx | 366 +---- .../local_retrieval_qa.ipynb | 424 ++++++ .../question_answering/local_retrieval_qa.mdx | 68 - .../question_answering/quickstart.ipynb | 849 +++++++++++ .../question_answering/sources.ipynb | 239 +++ .../question_answering/streaming.ipynb | 416 ++++++ .../docs/use_cases/rag/_category_.yml | 2 - docs/core_docs/package.json | 7 +- docs/core_docs/static/img/rag_indexing.png | Bin 0 -> 132395 bytes .../static/img/rag_retrieval_generation.png | Bin 0 -> 59778 bytes docs/core_docs/vercel_build.sh | 13 + 29 files changed, 4138 insertions(+), 895 deletions(-) create mode 100644 deno.json rename docs/core_docs/docs/{community.md => community.mdx} (100%) rename docs/core_docs/docs/ecosystem/integrations/{databerry.md => databerry.mdx} (100%) rename docs/core_docs/docs/ecosystem/integrations/{helicone.md => helicone.mdx} (100%) rename docs/core_docs/docs/production/{deployment.md => deployment.mdx} (100%) rename docs/core_docs/docs/production/{tracing.md => tracing.mdx} (100%) rename docs/core_docs/docs/{security.md => security.mdx} (100%) rename docs/core_docs/docs/use_cases/{rag => }/code_understanding.mdx (99%) delete mode 100644 docs/core_docs/docs/use_cases/question_answering/advanced_conversational_qa.mdx create mode 100644 docs/core_docs/docs/use_cases/question_answering/chat_history.ipynb create mode 100644 docs/core_docs/docs/use_cases/question_answering/citations.ipynb create mode 100644 docs/core_docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb delete mode 100644 docs/core_docs/docs/use_cases/question_answering/conversational_retrieval_agents.mdx create mode 100644 docs/core_docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb delete mode 100644 docs/core_docs/docs/use_cases/question_answering/local_retrieval_qa.mdx create mode 100644 docs/core_docs/docs/use_cases/question_answering/quickstart.ipynb create mode 100644 docs/core_docs/docs/use_cases/question_answering/sources.ipynb create mode 100644 docs/core_docs/docs/use_cases/question_answering/streaming.ipynb delete mode 100644 docs/core_docs/docs/use_cases/rag/_category_.yml create mode 100644 docs/core_docs/static/img/rag_indexing.png create mode 100644 docs/core_docs/static/img/rag_retrieval_generation.png create mode 100644 docs/core_docs/vercel_build.sh diff --git a/.gitignore b/.gitignore index fd242a79ea57..31a080cecceb 100644 --- a/.gitignore +++ b/.gitignore @@ -42,4 +42,5 @@ langchain/api_refs_docs_build/dist/**/* .docusaurus/ docs/build/ -docs/api_refs/typedoc.json \ No newline at end of file +docs/api_refs/typedoc.json +docs/core_docs/**/*.md \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b9490f3882b3..b7de111ae5e0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -266,6 +266,32 @@ and in generated documentation. ### Contribute Documentation +#### Install dependencies + +##### Note: you only need to follow these steps if you are building the docs site locally. + +1. [Quarto](https://quarto.org/) - package that converts Jupyter notebooks (`.ipynb` files) into `.mdx` files for serving in Docusaurus. +2. `yarn build --filter=core_docs` - It's as simple as that! (or you can simply run `yarn build` from `docs/core_docs/`) + +All notebooks are converted to `.md` files and automatically gitignored. If you would like to create a non notebook doc, it must be a `.mdx` file. + +### Writing Notebooks + +When adding new dependencies inside the notebook you must update the import map inside `deno.json` in the root of the LangChain repo. + +This is required because the notebooks use the Deno runtime, and Deno formats imports differently than Node.js. + +Example: + +```typescript +// Import in Node: +import { z } from "zod"; +// Import in Deno: +import { z } from "npm:/zod"; +``` + +See examples inside `deno.json` for more details. + Docs are largely autogenerated by [TypeDoc](https://typedoc.org/) from the code. For that reason, we ask that you add good documentation to all classes and methods. diff --git a/deno.json b/deno.json new file mode 100644 index 000000000000..a6fd763f93af --- /dev/null +++ b/deno.json @@ -0,0 +1,14 @@ +{ + "imports": { + "langchain/": "npm:/langchain/", + "@langchain/community/": "npm:/@langchain/community/", + "@langchain/openai": "npm:@langchain/openai", + "@langchain/core/": "npm:/@langchain/core/", + "cheerio": "npm:/cheerio", + "@langchain/pinecone": "npm:@langchain/pinecone", + "@pinecone-database/pinecone": "npm:@pinecone-database/pinecone", + "zod": "npm:/zod", + "@langchain/anthropic": "npm:@langchain/anthropic", + "node-llama-cpp": "npm:/node-llama-cpp" + } +} \ No newline at end of file diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore index cd5a72c0f0cd..7a5edfacbac6 100644 --- a/docs/core_docs/.gitignore +++ b/docs/core_docs/.gitignore @@ -29,3 +29,5 @@ yarn-error.log* !.yarn/releases !.yarn/sdks !.yarn/versions + +/.quarto/ \ No newline at end of file diff --git a/docs/core_docs/docs/community.md b/docs/core_docs/docs/community.mdx similarity index 100% rename from docs/core_docs/docs/community.md rename to docs/core_docs/docs/community.mdx diff --git a/docs/core_docs/docs/ecosystem/integrations/databerry.md b/docs/core_docs/docs/ecosystem/integrations/databerry.mdx similarity index 100% rename from docs/core_docs/docs/ecosystem/integrations/databerry.md rename to docs/core_docs/docs/ecosystem/integrations/databerry.mdx diff --git a/docs/core_docs/docs/ecosystem/integrations/helicone.md b/docs/core_docs/docs/ecosystem/integrations/helicone.mdx similarity index 100% rename from docs/core_docs/docs/ecosystem/integrations/helicone.md rename to docs/core_docs/docs/ecosystem/integrations/helicone.mdx diff --git a/docs/core_docs/docs/get_started/introduction.mdx b/docs/core_docs/docs/get_started/introduction.mdx index 2fc76bc2141a..e592e0e496e8 100644 --- a/docs/core_docs/docs/get_started/introduction.mdx +++ b/docs/core_docs/docs/get_started/introduction.mdx @@ -80,7 +80,7 @@ Let models choose which tools to use given high-level directives Walkthroughs and techniques for common end-to-end use cases, like: - [Document question answering](/docs/use_cases/question_answering/) -- [RAG](/docs/use_cases/rag/code_understanding) +- [RAG](/docs/use_cases/question_answering/) - [Agents](/docs/use_cases/autonomous_agents/) - and much more... diff --git a/docs/core_docs/docs/production/deployment.md b/docs/core_docs/docs/production/deployment.mdx similarity index 100% rename from docs/core_docs/docs/production/deployment.md rename to docs/core_docs/docs/production/deployment.mdx diff --git a/docs/core_docs/docs/production/tracing.md b/docs/core_docs/docs/production/tracing.mdx similarity index 100% rename from docs/core_docs/docs/production/tracing.md rename to docs/core_docs/docs/production/tracing.mdx diff --git a/docs/core_docs/docs/security.md b/docs/core_docs/docs/security.mdx similarity index 100% rename from docs/core_docs/docs/security.md rename to docs/core_docs/docs/security.mdx diff --git a/docs/core_docs/docs/use_cases/rag/code_understanding.mdx b/docs/core_docs/docs/use_cases/code_understanding.mdx similarity index 99% rename from docs/core_docs/docs/use_cases/rag/code_understanding.mdx rename to docs/core_docs/docs/use_cases/code_understanding.mdx index 7ffa80c76544..6b0dff5d7360 100644 --- a/docs/core_docs/docs/use_cases/rag/code_understanding.mdx +++ b/docs/core_docs/docs/use_cases/code_understanding.mdx @@ -1,4 +1,4 @@ -# RAG over code +# Code Understanding ## Use case diff --git a/docs/core_docs/docs/use_cases/question_answering/_category_.yml b/docs/core_docs/docs/use_cases/question_answering/_category_.yml index e54d1d866a22..e18fc3a3e8a6 100644 --- a/docs/core_docs/docs/use_cases/question_answering/_category_.yml +++ b/docs/core_docs/docs/use_cases/question_answering/_category_.yml @@ -1,2 +1,2 @@ -label: 'QA and Chat over Documents' +label: 'Q&A with RAG' position: 3 \ No newline at end of file diff --git a/docs/core_docs/docs/use_cases/question_answering/advanced_conversational_qa.mdx b/docs/core_docs/docs/use_cases/question_answering/advanced_conversational_qa.mdx deleted file mode 100644 index 4bc901a3d6bb..000000000000 --- a/docs/core_docs/docs/use_cases/question_answering/advanced_conversational_qa.mdx +++ /dev/null @@ -1,281 +0,0 @@ -# Advanced Conversational QA - -import CodeBlock from "@theme/CodeBlock"; -import AdvancedConversationalQAExample from "@examples/use_cases/advanced/conversational_qa.ts"; - -Conversing with LLMs is a great way to demonstrate their capabilities. Adding chat history and external context can exponentially increase the complexity of the conversation. -In this example, we'll show how to use `Runnables` to construct a conversational QA system that can answer questions, remember previous chats, and utilize external context. - -The first step is to load our context (in this example we'll use the State Of The Union speech from 2022). This is also a good place to instantiate our retriever, and memory classes. - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/openai @langchain/community -``` - -```typescript -import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; -import { HNSWLib } from "langchain/vectorstores/hnswlib"; -import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; -import { BufferMemory } from "langchain/memory"; -import * as fs from "fs"; - -/* Initialize the LLM to use to answer the question */ -const model = new ChatOpenAI({}); -/* Load in the file we want to do question answering over */ -const text = fs.readFileSync("state_of_the_union.txt", "utf8"); -/* Split the text into chunks */ -const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000 }); -const docs = await textSplitter.createDocuments([text]); -/* Create the vectorstore and initialize it as a retriever */ -const vectorStore = await HNSWLib.fromDocuments(docs, new OpenAIEmbeddings()); -const retriever = vectorStore.asRetriever(); -/* Initialize our BufferMemory store */ -const memory = new BufferMemory({ - memoryKey: "chatHistory", -}); -``` - -Next, we will need some helper utils to serialize our context (converting inputs to strings). - -```typescript -import { Document } from "langchain/document"; - -/* Ensure our chat history is always passed in as a string */ -const serializeChatHistory = (chatHistory: string | Array) => { - if (Array.isArray(chatHistory)) { - return chatHistory.join("\n"); - } - return chatHistory; -}; -``` - -Our conversational system performs two main LLM queries. -The first is the question answering: given some context and chat history, answer the user's question. -The second is when the LLM is passed chat history. In that case, the LLM will respond with a better formatted question, -utilizing past history and the current question. -Let's create our prompts for both. - -```typescript -import { PromptTemplate } from "langchain/prompts"; - -/** - * Create a prompt template for generating an answer based on context and - * a question. - * - * Chat history will be an empty string if it's the first question. - * - * inputVariables: ["chatHistory", "context", "question"] - */ -const questionPrompt = PromptTemplate.fromTemplate( - `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. ----------------- -CHAT HISTORY: {chatHistory} ----------------- -CONTEXT: {context} ----------------- -QUESTION: {question} ----------------- -Helpful Answer:` -); - -/** - * Creates a prompt template for __generating a question__ to then ask an LLM - * based on previous chat history, context and the question. - * - * inputVariables: ["chatHistory", "question"] - */ -const questionGeneratorTemplate = - PromptTemplate.fromTemplate(`Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question. ----------------- -CHAT HISTORY: {chatHistory} ----------------- -FOLLOWUP QUESTION: {question} ----------------- -Standalone question:`); -``` - -Now we can start writing our main question answering sequence. For this, we'll put everything together using a `RunnableSequence` and one helper function -that abstracts the last processing step. - -```typescript -import { RunnableSequence } from "@langchain/core/runnables"; -import { StringOutputParser } from "@langchain/core/output_parsers"; -import { LLMChain } from "langchain/chains"; -import { formatDocumentsAsString } from "langchain/util/document"; - -/** - * A helper function which performs the LLM call and saves the context to memory. - */ -const handleProcessQuery = async (input: { - question: string; - context: string; - chatHistory?: string | Array; -}) => { - const chain = new LLMChain({ - llm: model, - prompt: questionPrompt, - outputParser: new StringOutputParser(), - }); - - const { text } = await chain.call({ - ...input, - chatHistory: serializeChatHistory(input.chatHistory ?? ""), - }); - - await memory.saveContext( - { - human: input.question, - }, - { - ai: text, - } - ); - - return text; -}; - -const answerQuestionChain = RunnableSequence.from([ - { - question: (input: { - question: string; - chatHistory?: string | Array; - }) => input.question, - }, - { - question: (previousStepResult: { - question: string; - chatHistory?: string | Array; - }) => previousStepResult.question, - chatHistory: (previousStepResult: { - question: string; - chatHistory?: string | Array; - }) => serializeChatHistory(previousStepResult.chatHistory ?? ""), - context: async (previousStepResult: { - question: string; - chatHistory?: string | Array; - }) => { - // Fetch relevant docs and serialize to a string. - const relevantDocs = await retriever.getRelevantDocuments( - previousStepResult.question - ); - const serialized = formatDocumentsAsString(relevantDocs); - return serialized; - }, - }, - handleProcessQuery, -]); -``` - -In the above code we're using a `RunnableSequence` which takes in one `question` input. -This input then gets piped to the next step where we perform the following operations: - -1. Pass the question through unchanged. -2. Serialize the chat history into a string, if it's been passed in. -3. Fetch relevant documents from the retriever and serialize them into a string. - -After this we can create a `RunnableSequence` for generating questions based on past history and the current question. - -```typescript -const generateQuestionChain = RunnableSequence.from([ - { - question: (input: { - question: string; - chatHistory: string | Array; - }) => input.question, - chatHistory: async () => { - const memoryResult = await memory.loadMemoryVariables({}); - return serializeChatHistory(memoryResult.chatHistory ?? ""); - }, - }, - questionGeneratorTemplate, - model, - // Take the result of the above model call, and pass it through to the - // next RunnableSequence chain which will answer the question - { - question: (previousStepResult: { text: string }) => previousStepResult.text, - }, - answerQuestionChain, -]); -``` - -The steps taken here are largely the same. We're taking a `question` as an input, and querying our memory store for the `chatHistory`. -Next we pipe those values into our prompt template, and then the LLM model for performing the request. -Finally, we take the result of the LLM query (unparsed) and pass it to the `answerQuestionChain` as a key-value pair where the key is `question`. - -Now that we have our two main operations defined, we can create a `RunnableBranch` which given two inputs, it performs the first `Runnable` where the check function returns true. -We also have to pass a fallback `Runnable` for cases where all checks return false (this should never occur in practice with our specific example). - -```typescript -import { RunnableBranch } from "@langchain/core/runnables"; - -const branch = RunnableBranch.from([ - [ - async () => { - const memoryResult = await memory.loadMemoryVariables({}); - const isChatHistoryPresent = !memoryResult.chatHistory.length; - - return isChatHistoryPresent; - }, - answerQuestionChain, - ], - [ - async () => { - const memoryResult = await memory.loadMemoryVariables({}); - const isChatHistoryPresent = - !!memoryResult.chatHistory && memoryResult.chatHistory.length; - - return isChatHistoryPresent; - }, - generateQuestionChain, - ], - answerQuestionChain, -]); -``` - -The checks are fairly simple. We're just checking if the chat history is present or not. - -Lastly we create our full chain which takes in a question, runs the `RunnableBranch` to determine which `Runnable` to use, and then returns the result! - -```typescript -/* Define our chain which calls the branch with our input. */ -const fullChain = RunnableSequence.from([ - { - question: (input: { question: string }) => input.question, - }, - branch, -]); - -/* Invoke our `Runnable` with the first question */ -const resultOne = await fullChain.invoke({ - question: "What did the president say about Justice Breyer?", -}); - -console.log({ resultOne }); -/** - * { - * resultOne: 'The president thanked Justice Breyer for his service and described him as an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court.' - * } - */ - -/* Invoke our `Runnable` again with a followup question */ -const resultTwo = await fullChain.invoke({ - question: "Was it nice?", -}); - -console.log({ resultTwo }); -/** - * { - * resultTwo: "Yes, the president's description of Justice Breyer was positive." - * } - */ -``` - -That's it! Now we can have a full contextual conversation with our LLM. - -The full code for this example can be found below. - -{AdvancedConversationalQAExample} diff --git a/docs/core_docs/docs/use_cases/question_answering/chat_history.ipynb b/docs/core_docs/docs/use_cases/question_answering/chat_history.ipynb new file mode 100644 index 000000000000..752fa029837b --- /dev/null +++ b/docs/core_docs/docs/use_cases/question_answering/chat_history.ipynb @@ -0,0 +1,351 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Add chat history\n", + "\n", + "In many Q&A applications we want to allow the user to have a back-and-forth conversation, meaning the application needs some sort of \"memory\" of past questions and answers, and some logic for incorporating those into its current thinking.\n", + "\n", + "In this guide we focus on **adding logic for incorporating historical messages, and NOT on chat history management.** Chat history management is [covered here](/docs/expression_language/how_to/message_history).\n", + "\n", + "We'll work off of the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [Quickstart](/docs/use_cases/question_answering/quickstart). We'll need to update two things about our existing app:\n", + "\n", + "1. **Prompt**: Update our prompt to support historical messages as an input.\n", + "2. **Contextualizing questions**: Add a sub-chain that takes the latest user question and reformulates it in the context of the chat history. This is needed in case the latest question references some context from past messages. For example, if a user asks a follow-up question like \"Can you elaborate on the second point?\", this cannot be understood without the context of the previous message. Therefore we can't effectively perform retrieval with a question like this." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/modules/model_io/chat) or [LLM](/docs/modules/model_io/llms), [Embeddings](/docs/modules/data_connection/text_embedding/), and [VectorStore](/docs/modules/data_connection/vectorstores/) or [Retriever](/docs/modules/data_connection/retrievers/).\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/openai cheerio\n", + "```\n", + "\n", + "We need to set environment variable `OPENAI_API_KEY`:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initial setup" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import \"cheerio\";\n", + "import { CheerioWebBaseLoader } from \"langchain/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", + "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", + "import { pull } from \"langchain/hub\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { RunnableSequence, RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "\n", + "const loader = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", + ");\n", + "\n", + "const docs = await loader.load();\n", + "\n", + "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits = await textSplitter.splitDocuments(docs);\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", + "\n", + "// Retrieve and generate using the relevant snippets of the blog.\n", + "const retriever = vectorStore.asRetriever();\n", + "const prompt = await pull(\"rlm/rag-prompt\");\n", + "const llm = new ChatOpenAI({ modelName: \"gpt-3.5-turbo\", temperature: 0 });\n", + "const ragChain = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt,\n", + " outputParser: new StringOutputParser(),\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. I\"\u001b[39m... 208 more characters" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await ragChain.invoke({\n", + " context: await retriever.invoke(\"What is Task Decomposition?\"),\n", + " question: \"What is Task Decomposition?\"\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Contextualizing the question\n", + "\n", + "First we'll need to define a sub-chain that takes historical messages and the latest user question, and reformulates the question if it makes reference to any information in the historical information.\n", + "\n", + "We'll use a prompt that includes a `MessagesPlaceholder` variable under the name \"chat_history\". This allows us to pass in a list of Messages to the prompt using the \"chat_history\" input key, and these messages will be inserted after the system message and before the human message containing the latest question." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "\n", + "const contextualizeQSystemPrompt = `Given a chat history and the latest user question\n", + "which might reference context in the chat history, formulate a standalone question\n", + "which can be understood without the chat history. Do NOT answer the question,\n", + "just reformulate it if needed and otherwise return it as is.`;\n", + "\n", + "const contextualizeQPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", contextualizeQSystemPrompt],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{question}\"]\n", + "]);\n", + "const contextualizeQChain = contextualizeQPrompt.pipe(llm).pipe(new StringOutputParser());" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Using this chain we can ask follow-up questions that reference past messages and have them reformulated into standalone questions:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m'What is the definition of \"large\" in the context of a language model?'\u001b[39m" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n", + "\n", + "await contextualizeQChain.invoke({\n", + " chat_history: [\n", + " new HumanMessage(\"What does LLM stand for?\"),\n", + " new AIMessage(\"Large language model\") \n", + " ],\n", + " question: \"What is meant by large\",\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chain with chat history\n", + "\n", + "And now we can build our full QA chain. \n", + "\n", + "Notice we add some routing functionality to only run the \"condense question chain\" when our chat history isn't empty. Here we're taking advantage of the fact that if a function in an LCEL chain returns another chain, that chain will itself be invoked." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\"\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { formatDocumentsAsString } from \"langchain/util/document\";\n", + "\n", + "const qaSystemPrompt = `You are an assistant for question-answering tasks.\n", + "Use the following pieces of retrieved context to answer the question.\n", + "If you don't know the answer, just say that you don't know.\n", + "Use three sentences maximum and keep the answer concise.\n", + "\n", + "{context}`\n", + "\n", + "const qaPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", qaSystemPrompt],\n", + " new MessagesPlaceholder(\"chat_history\"),\n", + " [\"human\", \"{question}\"]\n", + "]);\n", + "\n", + "const contextualizedQuestion = (input: Record) => {\n", + " if (\"chat_history\" in input) {\n", + " return contextualizeQChain;\n", + " }\n", + " return input.question;\n", + "};\n", + "\n", + "const ragChain = RunnableSequence.from([\n", + " RunnablePassthrough.assign({\n", + " context: (input: Record) => {\n", + " if (\"chat_history\" in input) {\n", + " const chain = contextualizedQuestion(input);\n", + " return chain.pipe(retriever).pipe(formatDocumentsAsString);\n", + " }\n", + " return \"\";\n", + " },\n", + " }),\n", + " qaPrompt,\n", + " llm\n", + "])" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: \"Task decomposition is a technique used to break down complex tasks into smaller and more manageable \"... 278 more characters,\n", + " additional_kwargs: { function_call: undefined, tool_calls: undefined }\n", + " },\n", + " lc_namespace: [ \"langchain_core\", \"messages\" ],\n", + " content: \"Task decomposition is a technique used to break down complex tasks into smaller and more manageable \"... 278 more characters,\n", + " name: undefined,\n", + " additional_kwargs: { function_call: undefined, tool_calls: undefined }\n", + "}\n" + ] + }, + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"Common ways of task decomposition include using prompting techniques like Chain of Thought (CoT) or \"\u001b[39m... 332 more characters,\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m }\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"Common ways of task decomposition include using prompting techniques like Chain of Thought (CoT) or \"\u001b[39m... 332 more characters,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: { function_call: \u001b[90mundefined\u001b[39m, tool_calls: \u001b[90mundefined\u001b[39m }\n", + "}" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "let chat_history = [];\n", + "\n", + "const question = \"What is task decomposition?\";\n", + "const aiMsg = await ragChain.invoke({ question, chat_history });\n", + "console.log(aiMsg)\n", + "chat_history = chat_history.concat(aiMsg);\n", + "\n", + "const secondQuestion = \"What are common ways of doing it?\";\n", + "await ragChain.invoke({ question: secondQuestion, chat_history });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See the first [LastSmith trace here](https://smith.langchain.com/public/527981c6-5018-4b68-a11a-ebcde77843e7/r) and the [second trace here](https://smith.langchain.com/public/7b97994a-ab9f-4bf3-a2e4-abb609e5610a/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Here we've gone over how to add application logic for incorporating historical outputs, but we're still manually updating the chat history and inserting it into each input. In a real Q&A application we'll want some way of persisting chat history and some way of automatically inserting and updating it.\n", + "\n", + "For this we can use:\n", + "\n", + "- [BaseChatMessageHistory](/docs/modules/memory/chat_messages/): Store chat history.\n", + "- [RunnableWithMessageHistory](/docs/expression_language/how_to/message_history): Wrapper for an LCEL chain and a `BaseChatMessageHistory` that handles injecting chat history into inputs and updating it after each invocation.\n", + "\n", + "For a detailed walkthrough of how to use these classes together to create a stateful conversational chain, head to the [How to add message history (memory)](/docs/expression_language/how_to/message_history) LCEL page." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/core_docs/docs/use_cases/question_answering/citations.ipynb b/docs/core_docs/docs/use_cases/question_answering/citations.ipynb new file mode 100644 index 000000000000..d05db8127f11 --- /dev/null +++ b/docs/core_docs/docs/use_cases/question_answering/citations.ipynb @@ -0,0 +1,1302 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Citations\n", + "\n", + "How can we get a model to cite which parts of the source documents it referenced in its response?\n", + "\n", + "To explore some techniques for extracting citations, let's first create a simple RAG chain. To start we'll just retrieve from the web using the [TavilySearchAPIRetriever](https://js.langchain.com/docs/integrations/retrievers/tavily)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/modules/model_io/chat) or [LLM](/docs/modules/model_io/llms), [Embeddings](https://js.langchain.com/docs/modules/data_connection/text_embedding/), and [VectorStore](https://js.langchain.com/docs/modules/data_connection/vectorstores/) or [Retriever](/docs/modules/data_connection/retrievers/).\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/community @langchain/openai\n", + "```\n", + "\n", + "We need to set environment variables for Tavily Search & OpenAI:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=YOUR_KEY\n", + "export TAVILY_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initial setup" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import { TavilySearchAPIRetriever } from \"@langchain/community/retrievers/tavily_search_api\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({\n", + " modelName: \"gpt-3.5-turbo\",\n", + " temperature: 0,\n", + "});\n", + "const retriever = new TavilySearchAPIRetriever({\n", + " k: 6,\n", + "});\n", + "const prompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You're a helpful AI assistant. Given a user question and some web article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\\n\\nHere are the web articles:{context}\"],\n", + " [\"human\", \"{question}\"],\n", + "])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we've got a model, retriever and prompt, let's chain them all together. We'll need to add some logic for formatting our retrieved `Document`s to a string that can be passed to our prompt. We'll make it so our chain returns both the answer and the retrieved Documents." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "import { Document } from \"@langchain/core/documents\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { RunnableMap, RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "\n", + "/**\n", + " * Format the documents into a readable string.\n", + " */\n", + "const formatDocs = (input: Record): string => {\n", + " const { docs } = input;\n", + " return \"\\n\\n\" + docs.map((doc: Document) => `Article title: ${doc.metadata.title}\\nArticle Snippet: ${doc.pageContent}`).join(\"\\n\\n\");\n", + "}\n", + "// subchain for generating an answer once we've done retrieval\n", + "const answerChain = prompt.pipe(llm).pipe(new StringOutputParser());\n", + "const map = RunnableMap.from({\n", + " question: new RunnablePassthrough(),\n", + " docs: retriever,\n", + "})\n", + "// complete chain that calls the retriever -> formats docs to string -> runs answer subchain -> returns just the answer and retrieved docs.\n", + "const chain = map.assign({ context: formatDocs }).assign({ answer: answerChain }).pick([\"answer\", \"docs\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " answer: \u001b[32m\"Cheetahs are capable of reaching speeds as high as 75 mph or 120 km/h. Their average speed, however,\"\u001b[39m... 29 more characters,\n", + " docs: [\n", + " Document {\n", + " pageContent: \u001b[32m\"Now, their only hope lies in the hands of human conservationists, working tirelessly to save the che\"\u001b[39m... 880 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Are Cheetahs, and Other Fascinating Facts About the World's ...\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.discovermagazine.com/planet-earth/how-fast-are-cheetahs-and-other-fascinating-facts-abou\"\u001b[39m... 21 more characters,\n", + " score: \u001b[33m0.93715\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"If a lion comes along, the cheetah will abandon its catch -- it can't fight off a lion, and chances \"\u001b[39m... 911 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"What makes a cheetah run so fast? | HowStuffWorks\"\u001b[39m,\n", + " source: \u001b[32m\"https://animals.howstuffworks.com/mammals/cheetah-speed.htm\"\u001b[39m,\n", + " score: \u001b[33m0.93412\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The science of cheetah speed\\n\"\u001b[39m +\n", + " \u001b[32m\"The cheetah (Acinonyx jubatus) is the fastest land animal on Earth, cap\"\u001b[39m... 738 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Can a Cheetah Run? - ThoughtCo\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.thoughtco.com/how-fast-can-a-cheetah-run-4587031\"\u001b[39m,\n", + " score: \u001b[33m0.93134\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"One of two videos from National Geographic's award-winning multimedia coverage of cheetahs in the ma\"\u001b[39m... 60 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"The Science of a Cheetah's Speed | National Geographic\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.youtube.com/watch?v=icFMTB0Pi0g\"\u001b[39m,\n", + " score: \u001b[33m0.93109\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Contact Us − +\\n\"\u001b[39m +\n", + " \u001b[32m\"Address\\n\"\u001b[39m +\n", + " \u001b[32m\"Smithsonian's National Zoo & Conservation Biology Institute  3001 Connecticut\"\u001b[39m... 1343 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Smithsonian's National Zoo and Conservation Biology Institute\"\u001b[39m,\n", + " source: \u001b[32m\"https://nationalzoo.si.edu/animals/cheetah\"\u001b[39m,\n", + " score: \u001b[33m0.92938\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Threats to the Cheetah’s Reign\\n\"\u001b[39m +\n", + " \u001b[32m\"As unparalleled as the cheetah’s speed might be, they face numerous c\"\u001b[39m... 907 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Can a Cheetah Run? The Secrets Behind Its Incredible Speed\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.explorationjunkie.com/how-fast-can-a-cheetah-run/\"\u001b[39m,\n", + " score: \u001b[33m0.871\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chain.invoke(\"How fast are cheetahs?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/bb0ed37e-b2be-4ae9-8b0d-ce2aff0b4b5e/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Function-calling\n", + "\n", + "### Cite documents\n", + "Let's try using [OpenAI function-calling](/docs/modules/model_io/chat/function_calling) to make the model specify which of the provided documents it's actually referencing when answering. LangChain has some utils for converting objects or zod objects to the JSONSchema format expected by OpenAI, so we'll use that to define our functions:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "import { z } from \"zod\";\n", + "import { StructuredTool } from \"@langchain/core/tools\";\n", + "import { formatToOpenAITool } from \"@langchain/openai\";\n", + "\n", + "class CitedAnswer extends StructuredTool {\n", + " name = \"cited_answer\";\n", + " \n", + " description = \"Answer the user question based only on the given sources, and cite the sources used.\";\n", + "\n", + " schema = z.object({\n", + " answer: z.string().describe(\"The answer to the user question, which is based only on the given sources.\"),\n", + " citations: z.array(z.number()).describe(\"The integer IDs of the SPECIFIC sources which justify the answer.\")\n", + " });\n", + "\n", + " constructor() {\n", + " super();\n", + " }\n", + "\n", + " _call(input: z.infer): Promise {\n", + " return Promise.resolve(JSON.stringify(input, null, 2));\n", + " }\n", + "}\n", + "\n", + "const asOpenAITool = formatToOpenAITool(new CitedAnswer());\n", + "const tools1 = [asOpenAITool];" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see what the model output is like when we pass in our functions and a user input:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "AIMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"\"\u001b[39m,\n", + " additional_kwargs: {\n", + " function_call: \u001b[90mundefined\u001b[39m,\n", + " tool_calls: [\n", + " {\n", + " id: \u001b[32m\"call_WzPoDCIRQ1pCah8k93cVrqex\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: \u001b[36m[Object]\u001b[39m\n", + " }\n", + " ]\n", + " }\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"\"\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {\n", + " function_call: \u001b[90mundefined\u001b[39m,\n", + " tool_calls: [\n", + " {\n", + " id: \u001b[32m\"call_WzPoDCIRQ1pCah8k93cVrqex\"\u001b[39m,\n", + " type: \u001b[32m\"function\"\u001b[39m,\n", + " function: {\n", + " name: \u001b[32m\"cited_answer\"\u001b[39m,\n", + " arguments: \u001b[32m\"{\\n\"\u001b[39m +\n", + " \u001b[32m` \"answer\": \"Brian's height is 6'2\\\\\" - 3 inches\",\\n`\u001b[39m +\n", + " \u001b[32m' \"citations\": [1, 3]\\n'\u001b[39m +\n", + " \u001b[32m\"}\"\u001b[39m\n", + " }\n", + " }\n", + " ]\n", + " }\n", + "}" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const llmWithTool1 = llm.bind({\n", + " tools: tools1,\n", + " tool_choice: asOpenAITool\n", + "});\n", + "\n", + "const exampleQ = `What Brian's height?\n", + "\n", + "Source: 1\n", + "Information: Suzy is 6'2\"\n", + "\n", + "Source: 2\n", + "Information: Jeremiah is blonde\n", + "\n", + "Source: 3\n", + "Information: Brian is 3 inches shorted than Suzy`;\n", + "\n", + "await llmWithTool1.invoke(exampleQ);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/34441213-cbb9-4775-a67e-2294aa1ccf69/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll add an output parser to convert the OpenAI API response to a nice object. We use the [JsonOutputKeyToolsParser](https://api.js.langchain.com/classes/langchain_output_parsers.JsonOutputKeyToolsParser.html) for this:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{ answer: \u001b[32m`Brian's height is 6'2\" - 3 inches`\u001b[39m, citations: [ \u001b[33m1\u001b[39m, \u001b[33m3\u001b[39m ] }" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { JsonOutputKeyToolsParser } from \"langchain/output_parsers\";\n", + "\n", + "const outputParser = new JsonOutputKeyToolsParser({ keyName: \"cited_answer\", returnSingle: true });\n", + "\n", + "await llmWithTool1.pipe(outputParser).invoke(exampleQ);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/1a045c25-ec5c-49f5-9756-6022edfea6af/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we're ready to put together our chain" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "import { Document } from \"@langchain/core/documents\";\n", + "\n", + "const formatDocsWithId = (docs: Array): string => {\n", + " return \"\\n\\n\" + docs.map((doc: Document, idx: number) => `Source ID: ${idx}\\nArticle title: ${doc.metadata.title}\\nArticle Snippet: ${doc.pageContent}`).join(\"\\n\\n\");\n", + "}\n", + "// subchain for generating an answer once we've done retrieval\n", + "const answerChain1 = prompt.pipe(llmWithTool1).pipe(outputParser);\n", + "const map1 = RunnableMap.from({\n", + " question: new RunnablePassthrough(),\n", + " docs: retriever,\n", + "})\n", + "// complete chain that calls the retriever -> formats docs to string -> runs answer subchain -> returns just the answer and retrieved docs.\n", + "const chain1 = map1\n", + " .assign({ context: (input: { docs: Array }) => formatDocsWithId(input.docs) })\n", + " .assign({ cited_answer: answerChain1 })\n", + " .pick([\"cited_answer\", \"docs\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " cited_answer: {\n", + " answer: \u001b[32m\"Cheetahs can reach speeds of up to 75 mph (120 km/h).\"\u001b[39m,\n", + " citations: [ \u001b[33m3\u001b[39m ]\n", + " },\n", + " docs: [\n", + " Document {\n", + " pageContent: \u001b[32m\"The speeds attained by the cheetah may be only slightly greater than those achieved by the pronghorn\"\u001b[39m... 2527 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah - Wikipedia\"\u001b[39m,\n", + " source: \u001b[32m\"https://en.wikipedia.org/wiki/Cheetah\"\u001b[39m,\n", + " score: \u001b[33m0.97773\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Contact Us − +\\n\"\u001b[39m +\n", + " \u001b[32m\"Address\\n\"\u001b[39m +\n", + " \u001b[32m\"Smithsonian's National Zoo & Conservation Biology Institute  3001 Connecticut\"\u001b[39m... 1343 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Smithsonian's National Zoo and Conservation Biology Institute\"\u001b[39m,\n", + " source: \u001b[32m\"https://nationalzoo.si.edu/animals/cheetah\"\u001b[39m,\n", + " score: \u001b[33m0.9681\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The maximum speed cheetahs have been measured at is 114 km (71 miles) per hour, and they routinely r\"\u001b[39m... 1048 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Description, Speed, Habitat, Diet, Cubs, & Facts\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.britannica.com/animal/cheetah-mammal\"\u001b[39m,\n", + " score: \u001b[33m0.9459\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The science of cheetah speed\\n\"\u001b[39m +\n", + " \u001b[32m\"The cheetah (Acinonyx jubatus) is the fastest land animal on Earth, cap\"\u001b[39m... 738 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Can a Cheetah Run? - ThoughtCo\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.thoughtco.com/how-fast-can-a-cheetah-run-4587031\"\u001b[39m,\n", + " score: \u001b[33m0.93957\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"One of two videos from National Geographic's award-winning multimedia coverage of cheetahs in the ma\"\u001b[39m... 60 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"The Science of a Cheetah's Speed | National Geographic\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.youtube.com/watch?v=icFMTB0Pi0g\"\u001b[39m,\n", + " score: \u001b[33m0.92814\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"If a lion comes along, the cheetah will abandon its catch -- it can't fight off a lion, and chances \"\u001b[39m... 911 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"What makes a cheetah run so fast? | HowStuffWorks\"\u001b[39m,\n", + " source: \u001b[32m\"https://animals.howstuffworks.com/mammals/cheetah-speed.htm\"\u001b[39m,\n", + " score: \u001b[33m0.85762\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chain1.invoke(\"How fast are cheetahs?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/2a29cfd6-89fa-45bb-9b2a-f730e81061c2/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Cite snippets\n", + "\n", + "What if we want to cite actual text spans? We can try to get our model to return these, too.\n", + "\n", + "*Aside: Note that if we break up our documents so that we have many documents with only a sentence or two instead of a few long documents, citing documents becomes roughly equivalent to citing snippets, and may be easier for the model because the model just needs to return an identifier for each snippet instead of the actual text. Probably worth trying both approaches and evaluating.*" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "const citationSchema = z.object({\n", + " sourceId: z.number().describe(\"The integer ID of a SPECIFIC source which justifies the answer.\"),\n", + " quote: z.string().describe(\"The VERBATIM quote from the specified source that justifies the answer.\")\n", + "})\n", + "\n", + "class QuotedAnswer extends StructuredTool {\n", + " name = \"quoted_answer\";\n", + " \n", + " description = \"Answer the user question based only on the given sources, and cite the sources used.\";\n", + "\n", + " schema = z.object({\n", + " answer: z.string().describe(\"The answer to the user question, which is based only on the given sources.\"),\n", + " citations: z.array(citationSchema).describe(\"Citations from the given sources that justify the answer.\")\n", + " });\n", + "\n", + " constructor() {\n", + " super();\n", + " }\n", + "\n", + " _call(input: z.infer): Promise {\n", + " return Promise.resolve(JSON.stringify(input, null, 2));\n", + " }\n", + "}\n", + "\n", + "const quotedAnswerTool = formatToOpenAITool(new QuotedAnswer());\n", + "const tools2 = [quotedAnswerTool];" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "import { Document } from \"@langchain/core/documents\";\n", + "\n", + "const outputParser2 = new JsonOutputKeyToolsParser({ keyName: \"quoted_answer\", returnSingle: true });\n", + "const llmWithTool2 = llm.bind({\n", + " tools: tools2,\n", + " tool_choice: quotedAnswerTool,\n", + "});\n", + "const answerChain2 = prompt.pipe(llmWithTool2).pipe(outputParser2);\n", + "const map2 = RunnableMap.from({\n", + " question: new RunnablePassthrough(),\n", + " docs: retriever,\n", + "})\n", + "// complete chain that calls the retriever -> formats docs to string -> runs answer subchain -> returns just the answer and retrieved docs.\n", + "const chain2 = map2\n", + " .assign({ context: (input: { docs: Array }) => formatDocsWithId(input.docs) })\n", + " .assign({ quoted_answer: answerChain2 })\n", + " .pick([\"quoted_answer\", \"docs\"]);" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " quoted_answer: {\n", + " answer: \u001b[32m\"Cheetahs can reach speeds of up to 70 mph.\"\u001b[39m,\n", + " citations: [\n", + " {\n", + " sourceId: \u001b[33m0\u001b[39m,\n", + " quote: \u001b[32m\"We’ve mentioned that these guys can reach speeds of up to 70 mph\"\u001b[39m\n", + " },\n", + " {\n", + " sourceId: \u001b[33m2\u001b[39m,\n", + " quote: \u001b[32m\"The maximum speed cheetahs have been measured at is 114 km (71 miles) per hour, and they routinely r\"\u001b[39m... 72 more characters\n", + " },\n", + " {\n", + " sourceId: \u001b[33m5\u001b[39m,\n", + " quote: \u001b[32m\"Cheetahs—the fastest land mammals on the planet—are able to reach speeds of up to 70 mph\"\u001b[39m\n", + " }\n", + " ]\n", + " },\n", + " docs: [\n", + " Document {\n", + " pageContent: \u001b[32m\"They are surprisingly graceful\\n\"\u001b[39m +\n", + " \u001b[32m\"Cheetahs are very lithe-they move quickly and full-grown adults weigh\"\u001b[39m... 824 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Are Cheetahs - Proud Animal\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.proudanimal.com/2024/01/27/fast-cheetahs/\"\u001b[39m,\n", + " score: \u001b[33m0.97272\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The Science of Speed\\n\"\u001b[39m +\n", + " \u001b[32m\"Instead, previous research has shown that the fastest animals are not the large\"\u001b[39m... 743 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Now Scientists Can Accurately Guess The Speed Of Any Animal\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.nationalgeographic.com/animals/article/Animal-speed-size-cheetahs\"\u001b[39m,\n", + " score: \u001b[33m0.96532\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The maximum speed cheetahs have been measured at is 114 km (71 miles) per hour, and they routinely r\"\u001b[39m... 1048 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Description, Speed, Habitat, Diet, Cubs, & Facts\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.britannica.com/animal/cheetah-mammal\"\u001b[39m,\n", + " score: \u001b[33m0.95122\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Now, their only hope lies in the hands of human conservationists, working tirelessly to save the che\"\u001b[39m... 880 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Are Cheetahs, and Other Fascinating Facts About the World's ...\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.discovermagazine.com/planet-earth/how-fast-are-cheetahs-and-other-fascinating-facts-abou\"\u001b[39m... 21 more characters,\n", + " score: \u001b[33m0.92667\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Contact Us − +\\n\"\u001b[39m +\n", + " \u001b[32m\"Address\\n\"\u001b[39m +\n", + " \u001b[32m\"Smithsonian's National Zoo & Conservation Biology Institute  3001 Connecticut\"\u001b[39m... 1343 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Smithsonian's National Zoo and Conservation Biology Institute\"\u001b[39m,\n", + " source: \u001b[32m\"https://nationalzoo.si.edu/animals/cheetah\"\u001b[39m,\n", + " score: \u001b[33m0.91253\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Cheetahs—the fastest land mammals on the planet—are incredible creatures. They're able to reach spee\"\u001b[39m... 95 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Amazing Cheetah Facts | How Fast is a Cheetah? - Popular Mechanics\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.popularmechanics.com/science/animals/g30021998/facts-about-cheetahs/\"\u001b[39m,\n", + " score: \u001b[33m0.87489\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chain2.invoke(\"How fast are cheetahs?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/2a032bc5-5b04-4dc3-8d85-49e5ec7e0157/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Direct prompting\n", + "\n", + "Most models don't yet support function-calling. We can achieve similar results with direct prompting. Let's see what this looks like using an Anthropic chat model that is particularly proficient in working with XML:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Setup\n", + "\n", + "Install the LangChain Anthropic integration package:\n", + "\n", + "```bash\n", + "npm install @langchain/anthropic\n", + "```\n", + "\n", + "Add your Anthropic API key to your environment:\n", + "\n", + "```bash\n", + "export ANTHROPIC_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatAnthropic } from \"@langchain/anthropic\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const anthropic = new ChatAnthropic({\n", + " modelName: \"claude-instant-1.2\",\n", + "});\n", + "const system = `You're a helpful AI assistant. Given a user question and some web article snippets,\n", + "answer the user question and provide citations. If none of the articles answer the question, just say you don't know.\n", + "\n", + "Remember, you must return both an answer and citations. A citation consists of a VERBATIM quote that\n", + "justifies the answer and the ID of the quote article. Return a citation for every quote across all articles\n", + "that justify the answer. Use the following format for your final output:\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " ...\n", + " \n", + "\n", + "\n", + "Here are the web articles:{context}`;\n", + "\n", + "const anthropicPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", system],\n", + " [\"human\", \"{question}\"]\n", + "]);" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "import { XMLOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { Document } from \"@langchain/core/documents\";\n", + "import { RunnableLambda, RunnablePassthrough, RunnableMap } from \"@langchain/core/runnables\";\n", + "\n", + "const formatDocsToXML = (docs: Array): string => {\n", + " const formatted: Array = [];\n", + " docs.forEach((doc, idx) => {\n", + " const docStr = `\n", + " ${doc.metadata.title}\n", + " ${doc.pageContent}\n", + "`\n", + " formatted.push(docStr);\n", + " });\n", + " return `\\n\\n${formatted.join(\"\\n\")}`;\n", + "}\n", + "\n", + "const format3 = new RunnableLambda({\n", + " func: (input: { docs: Array }) => formatDocsToXML(input.docs)\n", + "})\n", + "const answerChain = anthropicPrompt\n", + " .pipe(anthropic)\n", + " .pipe(new XMLOutputParser())\n", + " .pipe(\n", + " new RunnableLambda({ func: (input: { cited_answer: any }) => input.cited_answer })\n", + " );\n", + "const map3 = RunnableMap.from({\n", + " question: new RunnablePassthrough(),\n", + " docs: retriever,\n", + "});\n", + "const chain3 = map3.assign({ context: format3 }).assign({ cited_answer: answerChain }).pick([\"cited_answer\", \"docs\"])" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " cited_answer: [\n", + " {\n", + " answer: \u001b[32m\"Cheetahs can reach top speeds of between 60 to 70 mph.\"\u001b[39m\n", + " },\n", + " {\n", + " citations: [\n", + " { citation: \u001b[36m[Array]\u001b[39m },\n", + " { citation: \u001b[36m[Array]\u001b[39m },\n", + " { citation: \u001b[36m[Array]\u001b[39m }\n", + " ]\n", + " }\n", + " ],\n", + " docs: [\n", + " Document {\n", + " pageContent: \u001b[32m\"A cheetah's muscular tail helps control their steering and keep their balance when running very fast\"\u001b[39m... 210 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"75 Amazing Cheetah Facts Your Kids Will Love (2024)\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.mkewithkids.com/post/cheetah-facts-for-kids/\"\u001b[39m,\n", + " score: \u001b[33m0.97081\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The maximum speed cheetahs have been measured at is 114 km (71 miles) per hour, and they routinely r\"\u001b[39m... 1048 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Description, Speed, Habitat, Diet, Cubs, & Facts\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.britannica.com/animal/cheetah-mammal\"\u001b[39m,\n", + " score: \u001b[33m0.96824\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The Science of Speed\\n\"\u001b[39m +\n", + " \u001b[32m\"Instead, previous research has shown that the fastest animals are not the large\"\u001b[39m... 743 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Now Scientists Can Accurately Guess The Speed Of Any Animal\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.nationalgeographic.com/animals/article/Animal-speed-size-cheetahs\"\u001b[39m,\n", + " score: \u001b[33m0.96237\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Contact Us − +\\n\"\u001b[39m +\n", + " \u001b[32m\"Address\\n\"\u001b[39m +\n", + " \u001b[32m\"Smithsonian's National Zoo & Conservation Biology Institute  3001 Connecticut\"\u001b[39m... 1343 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Smithsonian's National Zoo and Conservation Biology Institute\"\u001b[39m,\n", + " source: \u001b[32m\"https://nationalzoo.si.edu/animals/cheetah\"\u001b[39m,\n", + " score: \u001b[33m0.94565\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"They are surprisingly graceful\\n\"\u001b[39m +\n", + " \u001b[32m\"Cheetahs are very lithe-they move quickly and full-grown adults weigh\"\u001b[39m... 824 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Are Cheetahs - Proud Animal\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.proudanimal.com/2024/01/27/fast-cheetahs/\"\u001b[39m,\n", + " score: \u001b[33m0.91795\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Cheetahs are the world's fastest land animal. They can reach a speed of 69.5 miles per hour in just \"\u001b[39m... 100 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How fast is Tyreek Hill? 'The Cheetah' lives up to 40 time, Next Gen ...\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.sportingnews.com/us/nfl/news/fast-tyreek-hill-40-time-speed-chiefs/1cekgawhz39wr1tr472e4\"\u001b[39m... 5 more characters,\n", + " score: \u001b[33m0.83505\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 26, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chain3.invoke(\"How fast are cheetahs?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/bebd86f5-ae9c-49ea-bc26-69c4fdf195b1/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Retrieval post-processing\n", + "\n", + "Another approach is to post-process our retrieved documents to compress the content, so that the source content is already minimal enough that we don't need the model to cite specific sources or spans. For example, we could break up each document into a sentence or two, embed those and keep only the most relevant ones. LangChain has some built-in components for this. Here we'll use a [RecursiveCharacterTextSplitter](https://js.langchain.com/docs/modules/data_connection/document_transformers/recursive_text_splitter), which creates chunks of a specified size by splitting on separator substrings, and an [EmbeddingsFilter](https://js.langchain.com/docs/modules/data_connection/retrievers/contextual_compression#embeddingsfilter), which keeps only the texts with the most relevant embeddings." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The maximum speed cheetahs have been measured at is 114 km (71 miles) per hour, and they routinely reach velocities of 80–100 km (50–62 miles) per hour while pursuing prey.\n", + "cheetah,\n", + "(Acinonyx jubatus), \n", + "\n", + "\n", + "The science of cheetah speed\n", + "The cheetah (Acinonyx jubatus) is the fastest land animal on Earth, capable of reaching speeds as high as 75 mph or 120 km/h. Cheetahs are predators that sneak up on their prey and sprint a short distance to chase and attack.\n", + " Key Takeaways: How Fast Can a Cheetah Run?\n", + "Fastest Cheetah on Earth \n", + "\n", + "\n", + "Built for speed, the cheetah can accelerate from zero to 45 in just 2.5 seconds and reach top speeds of 60 to 70 mph, making it the fastest land mammal! Fun Facts\n", + "Conservation Status\n", + "Cheetah News\n", + "Taxonomic Information\n", + "Animal News\n", + "NZCBI staff in Front Royal, Virginia, are mourning the loss of Walnut, a white-naped crane who became an internet sensation for choosing one of her keepers as her mate. \n", + "\n", + "\n", + "Scientists calculate a cheetah's top speed is 75 mph, but the fastest recorded speed is somewhat slower. The top 10 fastest animals are: \n", + "\n", + "\n", + "The pronghorn, an American animal resembling an antelope, is the fastest land animal in the Western Hemisphere. While a cheetah's top speed ranges from 65 to 75 mph (104 to 120 km/h), its average speed is only 40 mph (64 km/hr), punctuated by short bursts at its top speed. Basically, if a predator threatens to take a cheetah's kill or attack its young, a cheetah has to run. \n", + "\n", + "\n", + "A cheetah eats a variety of small animals, including game birds, rabbits, small antelopes (including the springbok, impala, and gazelle), young warthogs, and larger antelopes (such as the kudu, hartebeest, oryx, and roan). Their faces are distinguished by prominent black lines that curve from the inner corner of each eye to the outer corners of the mouth, like a well-worn trail of inky tears. \n", + "\n", + "\n", + "4 kg) Cheetah moms spend a lot of time teaching their cubs to chase, sometimes dragging live animals back to the den so the cubs can practice the chase-and-catch process \n", + "\n", + "\n", + "Advertisement If confronted, a roughly 125-pound cheetah will always run rather than fight -- it's too weak, light and thin to have any chance against something like a lion, which can be twice as long as a cheetah and weigh more than 400 pounds (181 \n", + "\n", + "\n", + "Cheetahs eat a variety of small animals, including game birds, rabbits, small antelopes (including the springbok, impala, and gazelle), young warthogs, and larger antelopes (such as the kudu, hartebeest, oryx, and roan) \n", + "\n", + "\n", + "Historically, cheetahs ranged widely throughout Africa and Asia, from the Cape of Good Hope to the Mediterranean, throughout the Arabian Peninsula and the Middle East, from Israel, India and Pakistan north to the northern shores of the Caspian and Aral Seas, and west through Uzbekistan, Turkmenistan, Afghanistan, and Pakistan into central India. Header Links \n", + "\n", + "\n" + ] + } + ], + "source": [ + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { EmbeddingsFilter } from \"langchain/retrievers/document_compressors/embeddings_filter\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "import { DocumentInterface } from \"@langchain/core/documents\";\n", + "import { RunnableMap, RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "\n", + "const splitter = new RecursiveCharacterTextSplitter({\n", + " chunkSize: 400,\n", + " chunkOverlap: 0,\n", + " separators: [\"\\n\\n\", \"\\n\", \".\", \" \"],\n", + " keepSeparator: false,\n", + "});\n", + "\n", + "const compressor = new EmbeddingsFilter({\n", + " embeddings: new OpenAIEmbeddings(),\n", + " k: 10,\n", + "});\n", + "\n", + "const splitAndFilter = async (input): Promise> => {\n", + " const { docs, question } = input;\n", + " const splitDocs = await splitter.splitDocuments(docs);\n", + " const statefulDocs = await compressor.compressDocuments(splitDocs, question);\n", + " return statefulDocs;\n", + "};\n", + "\n", + "const retrieveMap = RunnableMap.from({\n", + " question: new RunnablePassthrough(),\n", + " docs: retriever,\n", + "});\n", + "\n", + "const retrieve = retrieveMap.pipe(splitAndFilter);\n", + "const docs = await retrieve.invoke(\"How fast are cheetahs?\");\n", + "for (const doc of docs) {\n", + " console.log(doc.pageContent, \"\\n\\n\");\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/1bb61806-7d09-463d-909a-a7da410e79d4/r)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "const chain4 = retrieveMap\n", + " .assign({ context: formatDocs })\n", + " .assign({ answer: answerChain })\n", + " .pick([\"answer\", \"docs\"]);" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " answer: [\n", + " {\n", + " answer: \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"Cheetahs are the fastest land animals. They can reach top speeds of around 75 mph (120 km/h) and ro\"\u001b[39m... 74 more characters\n", + " },\n", + " { citations: [ { citation: \u001b[36m[Array]\u001b[39m }, { citation: \u001b[36m[Array]\u001b[39m } ] }\n", + " ],\n", + " docs: [\n", + " Document {\n", + " pageContent: \u001b[32m\"The maximum speed cheetahs have been measured at is 114 km (71 miles) per hour, and they routinely r\"\u001b[39m... 1048 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"cheetah - Encyclopedia Britannica | Britannica\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.britannica.com/animal/cheetah-mammal\"\u001b[39m,\n", + " score: \u001b[33m0.97059\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Contact Us − +\\n\"\u001b[39m +\n", + " \u001b[32m\"Address\\n\"\u001b[39m +\n", + " \u001b[32m\"Smithsonian's National Zoo & Conservation Biology Institute  3001 Connecticut\"\u001b[39m... 1343 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah\"\u001b[39m,\n", + " source: \u001b[32m\"https://nationalzoo.si.edu/animals/cheetah\"\u001b[39m,\n", + " score: \u001b[33m0.95102\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The science of cheetah speed\\n\"\u001b[39m +\n", + " \u001b[32m\"The cheetah (Acinonyx jubatus) is the fastest land animal on Earth, cap\"\u001b[39m... 738 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Can a Cheetah Run?\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.thoughtco.com/how-fast-can-a-cheetah-run-4587031\"\u001b[39m,\n", + " score: \u001b[33m0.94974\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Now, their only hope lies in the hands of human conservationists, working tirelessly to save the che\"\u001b[39m... 880 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Are Cheetahs, and Other Fascinating Facts About the World's ...\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.discovermagazine.com/planet-earth/how-fast-are-cheetahs-and-other-fascinating-facts-abou\"\u001b[39m... 21 more characters,\n", + " score: \u001b[33m0.92695\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"One of two videos from National Geographic's award-winning multimedia coverage of cheetahs in the ma\"\u001b[39m... 60 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"The Science of a Cheetah's Speed | National Geographic\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.youtube.com/watch?v=icFMTB0Pi0g\"\u001b[39m,\n", + " score: \u001b[33m0.90754\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The speeds attained by the cheetah may be only slightly greater than those achieved by the pronghorn\"\u001b[39m... 2527 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah - Wikipedia\"\u001b[39m,\n", + " source: \u001b[32m\"https://en.wikipedia.org/wiki/Cheetah\"\u001b[39m,\n", + " score: \u001b[33m0.89476\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "// Note the documents have an article \"summary\" in the metadata that is now much longer than the\n", + "// actual document page content. This summary isn't actually passed to the model.\n", + "await chain4.invoke(\"How fast are cheetahs?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/f93302e6-a31b-454e-9fc7-94fb4a931a9d/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Generation post-processing\n", + "\n", + "Another approach is to post-process our model generation. In this example we'll first generate just an answer, and then we'll ask the model to annotate it's own answer with citations. The downside of this approach is of course that it is slower and more expensive, because two model calls need to be made.\n", + "\n", + "Let's apply this to our initial chain." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import { StructuredTool } from \"@langchain/core/tools\";\n", + "import { formatToOpenAITool } from \"@langchain/openai\";\n", + "import { z } from \"zod\";\n", + "\n", + "class AnnotatedAnswer extends StructuredTool {\n", + " name = \"annotated_answer\";\n", + "\n", + " description = \"Annotate the answer to the user question with quote citations that justify the answer\";\n", + "\n", + " schema = z.object({\n", + " citations: z.array(citationSchema).describe(\"Citations from the given sources that justify the answer.\"),\n", + " })\n", + "\n", + " _call(input: z.infer): Promise {\n", + " return Promise.resolve(JSON.stringify(input, null, 2));\n", + " }\n", + "}\n", + "\n", + "const annotatedAnswerTool = formatToOpenAITool(new AnnotatedAnswer());\n", + "\n", + "const llmWithTools5 = llm.bind({\n", + " tools: [annotatedAnswerTool],\n", + " tool_choice: annotatedAnswerTool,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n", + "import { RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { JsonOutputKeyToolsParser } from \"langchain/output_parsers\";\n", + "import { RunnableMap, RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "import { AIMessage, ToolMessage } from \"@langchain/core/messages\";\n", + "\n", + "const prompt5 = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You're a helpful AI assistant. Given a user question and some web article snippets, answer the user question. If none of the articles answer the question, just say you don't know.\\n\\nHere are the web articles:{context}\"],\n", + " [\"human\", \"{question}\"],\n", + " new MessagesPlaceholder({\n", + " variableName: \"chat_history\",\n", + " optional: true,\n", + " }),\n", + " new MessagesPlaceholder({\n", + " variableName: \"toolMessage\",\n", + " optional: true,\n", + " })\n", + "]);\n", + "\n", + "const answerChain5 = prompt5.pipe(llmWithTools5);\n", + "const annotationChain = RunnableSequence.from([\n", + " prompt5,\n", + " llmWithTools5,\n", + " new JsonOutputKeyToolsParser({ keyName: \"annotated_answer\", returnSingle: true }),\n", + " (input: any) => input.citations\n", + "]);\n", + "const map5 = RunnableMap.from({\n", + " question: new RunnablePassthrough(),\n", + " docs: retriever,\n", + "});\n", + "const chain5 = map5\n", + " .assign({ context: formatDocs })\n", + " .assign({ aiMessage: answerChain5 })\n", + " .assign({\n", + " chat_history: (input) => input.aiMessage,\n", + " toolMessage: (input) => new ToolMessage({\n", + " tool_call_id: input.aiMessage.additional_kwargs.tool_calls[0].id,\n", + " content: input.aiMessage.additional_kwargs.content ?? \"\",\n", + " })\n", + " })\n", + " .assign({\n", + " annotations: annotationChain,\n", + " })\n", + " .pick([\"answer\", \"docs\", \"annotations\"]);" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " docs: [\n", + " Document {\n", + " pageContent: \u001b[32m\"They are surprisingly graceful\\n\"\u001b[39m +\n", + " \u001b[32m\"Cheetahs are very lithe-they move quickly and full-grown adults weigh\"\u001b[39m... 824 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Are Cheetahs - Proud Animal\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.proudanimal.com/2024/01/27/fast-cheetahs/\"\u001b[39m,\n", + " score: \u001b[33m0.96021\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Contact Us − +\\n\"\u001b[39m +\n", + " \u001b[32m\"Address\\n\"\u001b[39m +\n", + " \u001b[32m\"Smithsonian's National Zoo & Conservation Biology Institute  3001 Connecticut\"\u001b[39m... 1343 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Smithsonian's National Zoo and Conservation Biology Institute\"\u001b[39m,\n", + " source: \u001b[32m\"https://nationalzoo.si.edu/animals/cheetah\"\u001b[39m,\n", + " score: \u001b[33m0.94798\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The science of cheetah speed\\n\"\u001b[39m +\n", + " \u001b[32m\"The cheetah (Acinonyx jubatus) is the fastest land animal on Earth, cap\"\u001b[39m... 738 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Can a Cheetah Run? - ThoughtCo\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.thoughtco.com/how-fast-can-a-cheetah-run-4587031\"\u001b[39m,\n", + " score: \u001b[33m0.92591\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The maximum speed cheetahs have been measured at is 114 km (71 miles) per hour, and they routinely r\"\u001b[39m... 1048 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Cheetah | Description, Speed, Habitat, Diet, Cubs, & Facts\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.britannica.com/animal/cheetah-mammal\"\u001b[39m,\n", + " score: \u001b[33m0.90128\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"The Science of Speed\\n\"\u001b[39m +\n", + " \u001b[32m\"Instead, previous research has shown that the fastest animals are not the large\"\u001b[39m... 743 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"Now Scientists Can Accurately Guess The Speed Of Any Animal\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.nationalgeographic.com/animals/article/Animal-speed-size-cheetahs\"\u001b[39m,\n", + " score: \u001b[33m0.90097\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Now, their only hope lies in the hands of human conservationists, working tirelessly to save the che\"\u001b[39m... 880 more characters,\n", + " metadata: {\n", + " title: \u001b[32m\"How Fast Are Cheetahs, and Other Fascinating Facts About the World's ...\"\u001b[39m,\n", + " source: \u001b[32m\"https://www.discovermagazine.com/planet-earth/how-fast-are-cheetahs-and-other-fascinating-facts-abou\"\u001b[39m... 21 more characters,\n", + " score: \u001b[33m0.89788\u001b[39m,\n", + " images: \u001b[1mnull\u001b[22m\n", + " }\n", + " }\n", + " ],\n", + " annotations: [\n", + " {\n", + " sourceId: \u001b[33m0\u001b[39m,\n", + " quote: \u001b[32m\"We’ve mentioned that these guys can reach speeds of up to 70 mph, but did you know they can go from \"\u001b[39m... 22 more characters\n", + " },\n", + " {\n", + " sourceId: \u001b[33m1\u001b[39m,\n", + " quote: \u001b[32m\"Built for speed, the cheetah can accelerate from zero to 45 in just 2.5 seconds and reach top speeds\"\u001b[39m... 52 more characters\n", + " },\n", + " {\n", + " sourceId: \u001b[33m2\u001b[39m,\n", + " quote: \u001b[32m\"The maximum speed cheetahs have been measured at is 114 km (71 miles) per hour, and they routinely r\"\u001b[39m... 72 more characters\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chain5.invoke(\"How fast are cheetahs?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith trace [here](https://smith.langchain.com/public/f4ca647d-b43d-49ba-8df5-65a9761f712e/r)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/core_docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb b/docs/core_docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb new file mode 100644 index 000000000000..68365432bd78 --- /dev/null +++ b/docs/core_docs/docs/use_cases/question_answering/conversational_retrieval_agents.ipynb @@ -0,0 +1,457 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using agents\n", + "\n", + "This is an agent specifically optimized for doing retrieval when necessary and also holding a conversation.\n", + "\n", + "To start, we will set up the retriever we want to use, and then turn it into a retriever tool. Next, we will use the high level constructor for this type of agent. Finally, we will walk through how to construct a conversational retrieval agent from components." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/modules/model_io/chat) or [LLM](/docs/modules/model_io/llms), [Embeddings](/docs/modules/data_connection/text_embedding/), and [VectorStore](/docs/modules/data_connection/vectorstores/) or [Retriever](/docs/modules/data_connection/retrievers/).\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/openai\n", + "```\n", + "\n", + "We need to set our environment variable for OpenAI:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The Retriever\n", + "\n", + "To start, we need a retriever to use! The code here is mostly just example code. Feel free to use your own retriever and skip to the section on creating a retriever tool." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { TextLoader } from \"langchain/document_loaders/fs/text\";\n", + "\n", + "const loader = new TextLoader(\"../../../../../examples/state_of_the_union.txt\");\n", + "const documents = await loader.load();" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "texts.length 41\n" + ] + } + ], + "source": [ + "import { CharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const textSplitter = new CharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 0 });\n", + "const texts = await textSplitter.splitDocuments(documents);\n", + "console.log(\"texts.length\", texts.length);\n", + "const embeddings = new OpenAIEmbeddings();\n", + "const db = await MemoryVectorStore.fromDocuments(texts, embeddings);" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "const retriever = db.asRetriever();" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Retriever Tool\n", + "\n", + "Now we need to create a tool for our retriever. The main things we need to pass in are a name for the retriever as well as a description. These will both be used by the language model, so they should be informative." + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "import { createRetrieverTool } from \"langchain/tools/retriever\";\n", + "\n", + "const tool = createRetrieverTool(retriever, {\n", + " name: \"search_state_of_union\",\n", + " description:\n", + " \"Searches and returns excerpts from the 2022 State of the Union.\",\n", + "});\n", + "const tools = [tool];" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Agent Constructor\n", + "\n", + "Here, we will use the high level `createOpenaiToolsAgent` API to construct the agent.\n", + "\n", + "Notice that beside the list of tools, the only thing we need to pass in is a language model to use.\n", + "Under the hood, this agent is using the OpenAI tool-calling capabilities, so we need to use a ChatOpenAI model." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\n", + " SystemMessagePromptTemplate {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " prompt: PromptTemplate {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " template: \u001b[32m\"You are a helpful assistant\"\u001b[39m,\n", + " inputVariables: [],\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " partialVariables: {}\n", + " },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"prompt\"\u001b[39m ],\n", + " inputVariables: [],\n", + " outputParser: \u001b[90mundefined\u001b[39m,\n", + " partialVariables: {},\n", + " template: \u001b[32m\"You are a helpful assistant\"\u001b[39m,\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " validateTemplate: \u001b[33mtrue\u001b[39m\n", + " }\n", + " },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"chat\"\u001b[39m ],\n", + " prompt: PromptTemplate {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " template: \u001b[32m\"You are a helpful assistant\"\u001b[39m,\n", + " inputVariables: [],\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " partialVariables: {}\n", + " },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"prompt\"\u001b[39m ],\n", + " inputVariables: [],\n", + " outputParser: \u001b[90mundefined\u001b[39m,\n", + " partialVariables: {},\n", + " template: \u001b[32m\"You are a helpful assistant\"\u001b[39m,\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " validateTemplate: \u001b[33mtrue\u001b[39m\n", + " }\n", + " },\n", + " MessagesPlaceholder {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: { optional: \u001b[33mtrue\u001b[39m, variableName: \u001b[32m\"chat_history\"\u001b[39m },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"chat\"\u001b[39m ],\n", + " variableName: \u001b[32m\"chat_history\"\u001b[39m,\n", + " optional: \u001b[33mtrue\u001b[39m\n", + " },\n", + " HumanMessagePromptTemplate {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " prompt: PromptTemplate {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " template: \u001b[32m\"{input}\"\u001b[39m,\n", + " inputVariables: \u001b[36m[Array]\u001b[39m,\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " partialVariables: {}\n", + " },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"prompt\"\u001b[39m ],\n", + " inputVariables: [ \u001b[32m\"input\"\u001b[39m ],\n", + " outputParser: \u001b[90mundefined\u001b[39m,\n", + " partialVariables: {},\n", + " template: \u001b[32m\"{input}\"\u001b[39m,\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " validateTemplate: \u001b[33mtrue\u001b[39m\n", + " }\n", + " },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"chat\"\u001b[39m ],\n", + " prompt: PromptTemplate {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " template: \u001b[32m\"{input}\"\u001b[39m,\n", + " inputVariables: [ \u001b[32m\"input\"\u001b[39m ],\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " partialVariables: {}\n", + " },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"prompt\"\u001b[39m ],\n", + " inputVariables: [ \u001b[32m\"input\"\u001b[39m ],\n", + " outputParser: \u001b[90mundefined\u001b[39m,\n", + " partialVariables: {},\n", + " template: \u001b[32m\"{input}\"\u001b[39m,\n", + " templateFormat: \u001b[32m\"f-string\"\u001b[39m,\n", + " validateTemplate: \u001b[33mtrue\u001b[39m\n", + " }\n", + " },\n", + " MessagesPlaceholder {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: { optional: \u001b[33mfalse\u001b[39m, variableName: \u001b[32m\"agent_scratchpad\"\u001b[39m },\n", + " lc_runnable: \u001b[33mtrue\u001b[39m,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompts\"\u001b[39m, \u001b[32m\"chat\"\u001b[39m ],\n", + " variableName: \u001b[32m\"agent_scratchpad\"\u001b[39m,\n", + " optional: \u001b[33mfalse\u001b[39m\n", + " }\n", + "]" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { pull } from \"langchain/hub\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const prompt = await pull(\"hwchase17/openai-tools-agent\");\n", + "prompt.promptMessages" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ temperature: 0 });" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "import { createOpenAIToolsAgent, AgentExecutor } from \"langchain/agents\";\n", + "\n", + "const agent = await createOpenAIToolsAgent({\n", + " llm,\n", + " tools,\n", + " prompt\n", + "});\n", + "const agentExecutor = new AgentExecutor({\n", + " agent,\n", + " tools,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can now try it out!" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "const result1 = await agentExecutor.invoke({ input: \"hi im bob\" });" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Hello Bob! How can I assist you today?\"\u001b[39m" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result1.output" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that it now does retrieval" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "const result2 = await agentExecutor.invoke({ \n", + " input: `what did the president say about ketanji brown jackson in the most recent state of the union? The current date is ${new Date().toDateString()}`\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"In the most recent State of the Union, the President mentioned Ketanji Brown Jackson as his nominee \"\u001b[39m... 176 more characters" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result2.output;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See a LangSmith trace for the run above [here](https://smith.langchain.com/public/02281666-7124-402e-bd12-722fb58976e5/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Notice that the follow up question asks about information previously retrieved, so no need to do another retrieval" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "const result3 = await agentExecutor.invoke({\n", + " input: \"how long ago did the president nominate ketanji brown jackson? Use all the tools to find the answer.\"\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"The president nominated Ketanji Brown Jackson 4 days ago.\"\u001b[39m" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "result3.output;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See a LangSmith trace for the run above [here](https://smith.langchain.com/public/2b9ade9d-1f7e-4ae6-bb28-567f96a669f0/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For more on how to use agents with retrievers and other tools, head to the [Agents](/docs/modules/agents) section." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/core_docs/docs/use_cases/question_answering/conversational_retrieval_agents.mdx b/docs/core_docs/docs/use_cases/question_answering/conversational_retrieval_agents.mdx deleted file mode 100644 index 53dbfba1f8b6..000000000000 --- a/docs/core_docs/docs/use_cases/question_answering/conversational_retrieval_agents.mdx +++ /dev/null @@ -1,207 +0,0 @@ -import CodeBlock from "@theme/CodeBlock"; - -# Conversational Retrieval Agents - -This is an agent specifically optimized for doing retrieval when necessary while holding a conversation and being able -to answer questions based on previous dialogue in the conversation. - -To start, we will set up the retriever we want to use, then turn it into a retriever tool. Next, we will use the high-level constructor for this type of agent. -Finally, we will walk through how to construct a conversational retrieval agent from components. - -## The Retriever - -To start, we need a retriever to use! The code here is mostly just example code. Feel free to use your own retriever and skip to the next section on creating a retriever tool. - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/openai -``` - -```typescript -import { FaissStore } from "langchain/vectorstores/faiss"; -import { OpenAIEmbeddings } from "@langchain/openai"; -import { TextLoader } from "langchain/document_loaders/fs/text"; -import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; - -const loader = new TextLoader("state_of_the_union.txt"); -const docs = await loader.load(); -const splitter = new RecursiveCharacterTextSplitter({ - chunkSize: 1000, - chunkOverlap: 0, -}); - -const texts = await splitter.splitDocuments(docs); - -const vectorStore = await FaissStore.fromDocuments( - texts, - new OpenAIEmbeddings() -); - -const retriever = vectorStore.asRetriever(); -``` - -## Retriever Tool - -Now we need to create a tool for our retriever. The main things we need to pass in are a `name` for the retriever as well as a `description`. These will both be used by the language model, so they should be informative. - -```typescript -import { createRetrieverTool } from "langchain/agents/toolkits"; - -const tool = createRetrieverTool(retriever, { - name: "search_state_of_union", - description: - "Searches and returns documents regarding the state-of-the-union.", -}); -``` - -## Agent Constructor - -Here, we will use the high level `create_conversational_retrieval_agent` API to construct the agent. -Notice that beside the list of tools, the only thing we need to pass in is a language model to use. - -Under the hood, this agent is using the OpenAIFunctionsAgent, so we need to use an ChatOpenAI model. - -```typescript -import { createConversationalRetrievalAgent } from "langchain/agents/toolkits"; -import { ChatOpenAI } from "@langchain/openai"; - -const model = new ChatOpenAI({ - temperature: 0, -}); - -const executor = await createConversationalRetrievalAgent(model, [tool], { - verbose: true, -}); -``` - -We can now try it out! - -```typescript -const result = await executor.invoke({ - input: "Hi, I'm Bob!", -}); - -console.log(result); - -/* - { - output: 'Hello Bob! How can I assist you today?', - intermediateSteps: [] - } -*/ - -const result2 = await executor.invoke({ - input: "What's my name?", -}); - -console.log(result2); - -/* - { output: 'Your name is Bob.', intermediateSteps: [] } -*/ - -const result3 = await executor.invoke({ - input: - "What did the president say about Ketanji Brown Jackson in the most recent state of the union?", -}); - -console.log(result3); - -/* - { - output: "In the most recent state of the union, President Biden mentioned Ketanji Brown Jackson. He nominated her as a Circuit Court of Appeals judge and described her as one of the nation's top legal minds who will continue Justice Breyer's legacy of excellence. He mentioned that she has received a broad range of support, including from the Fraternal Order of Police and former judges appointed by Democrats and Republicans.", - intermediateSteps: [ - {...} - ] - } -*/ - -const result4 = await executor.invoke({ - input: "How long ago did he nominate her?", -}); - -console.log(result4); - -/* - { - output: 'President Biden nominated Ketanji Brown Jackson four days before the most recent state of the union address.', - intermediateSteps: [] - } -*/ -``` - -Note that for the final call, the agent used previously retrieved information to answer the query and did not need to call the tool again! - -Here's a trace showing how the agent fetches documents to answer the question with the retrieval tool: - -https://smith.langchain.com/public/1e2b1887-ca44-4210-913b-a69c1b8a8e7e/r - -## Creating from components - -What actually is going on underneath the hood? Let's take a look so we can understand how to modify things going forward. - -### Memory - -In this example, we want the agent to remember not only previous conversations, but also previous intermediate steps. -For that, we can use `OpenAIAgentTokenBufferMemory`. Note that if you want to change whether the agent remembers intermediate steps, -how the long the retained buffer is, or anything like that you should change this part. - -```typescript -import { OpenAIAgentTokenBufferMemory } from "langchain/agents/toolkits"; - -const memory = new OpenAIAgentTokenBufferMemory({ - llm: model, - memoryKey: "chat_history", - outputKey: "output", -}); -``` - -You should make sure `memoryKey` is set to `"chat_history"` and `outputKey` is set to `"output"` for the OpenAI functions agent. -This memory also has `returnMessages` set to `true` by default. - -You can also load messages from prior conversations into this memory by initializing it with a pre-loaded chat history: - -```typescript -import { ChatOpenAI } from "@langchain/openai"; -import { OpenAIAgentTokenBufferMemory } from "langchain/agents/toolkits"; -import { HumanMessage, AIMessage } from "langchain/schema"; -import { ChatMessageHistory } from "langchain/memory"; - -const previousMessages = [ - new HumanMessage("My name is Bob"), - new AIMessage("Nice to meet you, Bob!"), -]; - -const chatHistory = new ChatMessageHistory(previousMessages); - -const memory = new OpenAIAgentTokenBufferMemory({ - llm: new ChatOpenAI({}), - memoryKey: "chat_history", - outputKey: "output", - chatHistory, -}); -``` - -### Agent executor - -We can recreate the agent executor directly with the `initializeAgentExecutorWithOptions` method. -This allows us to customize the agent's system message by passing in a `prefix` into `agentArgs`. -Importantly, we must pass in `return_intermediate_steps: true` since we are recording that with our memory object. - -```typescript -import { initializeAgentExecutorWithOptions } from "langchain/agents"; - -const executor = await initializeAgentExecutorWithOptions(tools, llm, { - agentType: "openai-functions", - memory, - returnIntermediateSteps: true, - agentArgs: { - prefix: - prefix ?? - `Do your best to answer the questions. Feel free to use any tools available to look up relevant information, only if necessary.`, - }, -}); -``` diff --git a/docs/core_docs/docs/use_cases/question_answering/index.mdx b/docs/core_docs/docs/use_cases/question_answering/index.mdx index 37841df26d20..97008d3db6d6 100644 --- a/docs/core_docs/docs/use_cases/question_answering/index.mdx +++ b/docs/core_docs/docs/use_cases/question_answering/index.mdx @@ -1,351 +1,57 @@ ---- -hide_table_of_contents: true ---- +# Q&A with RAG -# QA and Chat over Documents +## Overview -Chat and Question-Answering (QA) over `data` are popular LLM use-cases. +One of the most powerful applications enabled by LLMs is sophisticated question-answering (Q&A) chatbots. +These are applications that can answer questions about specific source information. +These applications use a technique known as Retrieval Augmented Generation, or RAG. -`data` can include many things, including: +### What is RAG? -- `Unstructured data` (e.g., PDFs) -- `Structured data` (e.g., SQL) -- `Code` (e.g., Python) +RAG is a technique for augmenting LLM knowledge with additional data. -Below we will review Chat and QA on `Unstructured data`. +LLMs can reason about wide-ranging topics, but their knowledge is limited to the public data up to a specific point in time that they were trained on. +If you want to build AI applications that can reason about private data or data introduced after a model's cutoff date, you need to augment the knowledge of the model with the specific information it needs. +The process of bringing the appropriate information and inserting it into the model prompt is known as Retrieval Augmented Generation (RAG). -![intro.png](/img/qa_intro.png) +LangChain has a number of components designed to help build Q&A applications, and RAG applications more generally. -`Unstructured data` can be loaded from many sources. +Note: Here we focus on Q&A for unstructured data. Two RAG use cases which we cover elsewhere are: -Check out the [document loader integrations here](/docs/integrations/document_loaders/) to browse the set of supported loaders. +- [Q&A over SQL data](/docs/use_cases/sql) +- [Q&A over code](/docs/use_cases/code_understanding) (e.g., TypeScript) -Each loader returns data as a LangChain `Document`. +## RAG Architecture -`Documents` are turned into a Chat or QA app following the general steps below: +A typical RAG application has two main components: -- `Splitting`: [Text splitters](/docs/modules/data_connection/document_transformers/) break `Documents` into splits of specified size -- `Storage`: Storage (e.g., often a [vectorstore](/docs/modules/data_connection/vectorstores/)) will house [and often embed](https://www.pinecone.io/learn/vector-embeddings/) the splits -- `Retrieval`: The app retrieves splits from storage (e.g., often [with similar embeddings](https://www.pinecone.io/learn/k-nearest-neighbor/) to the input question) -- `Output`: An [LLM](/docs/modules/model_io/llms/) produces an answer using a prompt that includes the question and the retrieved splits +**Indexing**: a pipeline for ingesting data from a source and indexing it. _This usually happens offline_. -![flow.jpeg](/img/qa_flow.jpeg) +**Retrieval and generation**: the actual RAG chain, which takes the user query at run time and retrieves the relevant data from the index, then passes that to the model. -## Quickstart +The most common full sequence from raw data to answer looks like: -Let's load this [blog post](https://lilianweng.github.io/posts/2023-06-23-agent/) on agents as an example `Document`. +**Indexing** -We'll have a QA app in a few lines of code. +1. **Load**: First we need to load our data. This is done with [DocumentLoaders](/docs/modules/data_connection/document_loaders/). +2. **Split**: [Text splitters](/docs/modules/data_connection/document_transformers/) break large `Documents` into smaller chunks. This is useful both for indexing data and for passing it in to a model, since large chunks are harder to search over and won't fit in a model's finite context window. +3. **Store**: We need somewhere to store and index our splits, so that they can later be searched over. This is often done using a [VectorStore](/docs/modules/data_connection/vectorstores/) and [Embeddings](/docs/modules/data_connection/text_embedding/) model. -First, set environment variables and install packages required for the guide: +![Indexing](/img/rag_indexing.png) -```shell -> yarn add cheerio -# Or load env vars in your preferred way: -> export OPENAI_API_KEY="..." -``` +**Retrieval and generation** -## 1. Loading, Splitting, Storage +1. **Retrieve**: Given a user input, relevant splits are retrieved from storage using a [Retriever](/docs/modules/data_connection/retrievers/). +2. **Generate**: A [ChatModel](/docs/modules/model_io/chat) / [LLM](/docs/modules/model_io/llms) produces an answer using a prompt that includes the question and the retrieved data -### 1.1 Getting started +![Retrieval generation](/img/rag_retrieval_generation.png) -Specify a `Document` loader. +## Table of contents -```typescript -// Document loader -import { CheerioWebBaseLoader } from "langchain/document_loaders/web/cheerio"; - -const loader = new CheerioWebBaseLoader( - "https://lilianweng.github.io/posts/2023-06-23-agent/" -); -const data = await loader.load(); -``` - -Split the `Document` into chunks for embedding and vector storage. - -```typescript -import { RecursiveCharacterTextSplitter } from "langchain/text_splitter"; - -const textSplitter = new RecursiveCharacterTextSplitter({ - chunkSize: 500, - chunkOverlap: 0, -}); - -const splitDocs = await textSplitter.splitDocuments(data); -``` - -Embed and store the splits in a vector database (for demo purposes we use an unoptimized, in-memory example but you can [browse integrations here](/docs/integrations/vectorstores/)): - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/openai -``` - -```typescript -import { OpenAIEmbeddings } from "@langchain/openai"; -import { MemoryVectorStore } from "langchain/vectorstores/memory"; - -const embeddings = new OpenAIEmbeddings(); - -const vectorStore = await MemoryVectorStore.fromDocuments( - splitDocs, - embeddings -); -``` - -Here are the three pieces together: - -![lc.png](/img/qa_data_load.png) - -### 1.2 Going Deeper - -#### 1.2.1 Integrations - -`Document Loaders` - -- Browse document loader integrations [here](/docs/integrations/document_loaders/). - -- See further documentation on loaders [here](/docs/modules/data_connection/document_loaders/). - -`Document Transformers` - -- All can ingest loaded `Documents` and process them (e.g., split). - -- See further documentation on transformers [here](/docs/integrations/document_transformers/). - -`Vectorstores` - -- Browse vectorstore integrations [here](/docs/integrations/vectorstores/). - -- See further documentation on vectorstores [here](/docs/modules/data_connection/vectorstores/). - -## 2. Retrieval - -### 2.1 Getting started - -Retrieve [relevant splits](https://www.pinecone.io/learn/what-is-similarity-search/) for any question using `similarity_search`. - -```typescript -const relevantDocs = await vectorStore.similaritySearch( - "What is task decomposition?" -); - -console.log(relevantDocs.length); - -// 4 -``` - -### 2.2 Going Deeper - -#### 2.2.1 Retrieval - -Vectorstores are commonly used for retrieval. - -But, they are not the only option. - -For example, SVMs (see thread [here](https://twitter.com/karpathy/status/1647025230546886658?s=20)) can also be used. - -LangChain [has many retrievers and retrieval methods](/docs/modules/data_connection/retrievers/) including, but not limited to, vectorstores. - -All retrievers implement some common methods, such as `getRelevantDocuments()`. - -## 3. QA - -### 3.1 Getting started - -Distill the retrieved documents into an answer using an LLM (e.g., `gpt-3.5-turbo`) with `RetrievalQA` chain. - -```typescript -import { RetrievalQAChain } from "langchain/chains"; -import { ChatOpenAI } from "@langchain/openai"; - -const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo" }); -const chain = RetrievalQAChain.fromLLM(model, vectorStore.asRetriever()); - -const response = await chain.call({ - query: "What is task decomposition?", -}); -console.log(response); - -/* - { - text: 'Task decomposition refers to the process of breaking down a larger task into smaller, more manageable subgoals. By decomposing a task, it becomes easier for an agent or system to handle complex tasks efficiently. Task decomposition can be done through various methods such as using prompting or task-specific instructions, or through human inputs. It helps in planning and organizing the steps required to complete a task effectively.' - } -*/ -``` - -### 3.2 Going Deeper - -#### 3.2.1 Integrations - -`LLMs` - -- Browse LLM integrations and further documentation [here](/docs/integrations/llms/). - -#### 3.2.2 Customizing the prompt - -The prompt in `RetrievalQA` chain can be customized as follows. - -```typescript -import { RetrievalQAChain } from "langchain/chains"; -import { ChatOpenAI } from "@langchain/openai"; -import { PromptTemplate } from "langchain/prompts"; - -const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo" }); - -const template = `Use the following pieces of context to answer the question at the end. -If you don't know the answer, just say that you don't know, don't try to make up an answer. -Use three sentences maximum and keep the answer as concise as possible. -Always say "thanks for asking!" at the end of the answer. -{context} -Question: {question} -Helpful Answer:`; - -const chain = RetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), { - prompt: PromptTemplate.fromTemplate(template), -}); - -const response = await chain.call({ - query: "What is task decomposition?", -}); - -console.log(response); - -/* - { - text: 'Task decomposition is the process of breaking down a large task into smaller, more manageable subgoals. This allows for efficient handling of complex tasks and aids in planning and organizing the steps needed to achieve the overall goal. Thanks for asking!' - } -*/ -``` - -#### 3.2.3 Returning source documents - -The full set of retrieved documents used for answer distillation can be returned using `return_source_documents=True`. - -```typescript -import { RetrievalQAChain } from "langchain/chains"; -import { ChatOpenAI } from "@langchain/openai"; - -const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo" }); - -const chain = RetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), { - returnSourceDocuments: true, -}); - -const response = await chain.call({ - query: "What is task decomposition?", -}); - -console.log(response.sourceDocuments[0]); - -/* -Document { - pageContent: 'Task decomposition can be done (1) by LLM with simple prompting like "Steps for XYZ.\\n1.", "What are the subgoals for achieving XYZ?", (2) by using task-specific instructions; e.g. "Write a story outline." for writing a novel, or (3) with human inputs.', - metadata: [Object] -} -*/ -``` - -#### 3.2.4 Customizing retrieved docs in the LLM prompt - -Retrieved documents can be fed to an LLM for answer distillation in a few different ways. - -`stuff`, `refine`, and `map-reduce` chains for passing documents to an LLM prompt are well summarized [here](/docs/modules/chains/document/). - -`stuff` is commonly used because it simply "stuffs" all retrieved documents into the prompt. - -The [loadQAChain](/docs/modules/chains/document/) methods are easy ways to pass documents to an LLM using these various approaches. - -```typescript -import { loadQAStuffChain } from "langchain/chains"; - -const stuffChain = loadQAStuffChain(model); - -const stuffResult = await stuffChain.call({ - input_documents: relevantDocs, - question: "What is task decomposition?", -}); - -console.log(stuffResult); -/* -{ - text: 'Task decomposition is the process of breaking down a large task into smaller, more manageable subgoals or steps. This allows for efficient handling of complex tasks by focusing on one subgoal at a time. Task decomposition can be done through various methods such as using simple prompting, task-specific instructions, or human inputs.' -} -*/ -``` - -## 4. Chat - -### 4.1 Getting started - -To keep chat history, we use a variant of the previous chain called a `ConversationalRetrievalQAChain`. -First, specify a `Memory buffer` to track the conversation inputs / outputs. - -```typescript -import { ConversationalRetrievalQAChain } from "langchain/chains"; -import { BufferMemory } from "langchain/memory"; -import { ChatOpenAI } from "@langchain/openai"; - -const memory = new BufferMemory({ - memoryKey: "chat_history", - returnMessages: true, -}); -``` - -Next, we initialize and call the chain: - -```typescript -const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo" }); -const chain = ConversationalRetrievalQAChain.fromLLM( - model, - vectorStore.asRetriever(), - { - memory, - } -); - -const result = await chain.call({ - question: "What are some of the main ideas in self-reflection?", -}); -console.log(result); - -/* -{ - text: 'Some main ideas in self-reflection include:\n' + - '\n' + - '1. Iterative Improvement: Self-reflection allows autonomous agents to improve by continuously refining past action decisions and correcting mistakes.\n' + - '\n' + - '2. Trial and Error: Self-reflection plays a crucial role in real-world tasks where trial and error are inevitable. It helps agents learn from failed trajectories and make adjustments for future actions.\n' + - '\n' + - '3. Constructive Criticism: Agents engage in constructive self-criticism of their big-picture behavior to identify areas for improvement.\n' + - '\n' + - '4. Decision and Strategy Refinement: Reflection on past decisions and strategies enables agents to refine their approach and make more informed choices.\n' + - '\n' + - '5. Efficiency and Optimization: Self-reflection encourages agents to be smart and efficient in their actions, aiming to complete tasks in the least number of steps.\n' + - '\n' + - 'These ideas highlight the importance of self-reflection in enhancing performance and guiding future actions.' -} -*/ -``` - -The `Memory buffer` has context to resolve `"it"` ("self-reflection") in the below question. - -```typescript -const followupResult = await chain.call({ - question: "How does the Reflexion paper handle it?", -}); -console.log(followupResult); - -/* -{ - text: "The Reflexion paper introduces a framework that equips agents with dynamic memory and self-reflection capabilities to improve their reasoning skills. The approach involves showing the agent two-shot examples, where each example consists of a failed trajectory and an ideal reflection on how to guide future changes in the agent's plan. These reflections are then added to the agent's working memory as context for querying a language model. The agent uses this self-reflection information to make decisions on whether to start a new trial or continue with the current plan." -} -*/ -``` - -### 4.2 Going deeper - -The [documentation](/docs/modules/chains/popular/chat_vector_db) on `ConversationalRetrievalQAChain` offers a few extensions, such as streaming and source documents. +- [Quickstart](/docs/use_cases/question_answering/): We recommend starting here. Many of the following guides assume you fully understand the architecture shown in the Quickstart. +- [Returning sources](/docs/use_cases/question_answering/): How to return the source documents used in a particular generation. +- [Streaming](/docs/use_cases/question_answering/): How to stream final answers as well as intermediate steps. +- [Adding chat history](/docs/use_cases/question_answering/): How to add chat history to a Q&A app. +- [Per-user retrieval](/docs/use_cases/question_answering/): How to do retrieval when each user has their own private data. +- [Using agents](/docs/use_cases/question_answering/): How to use agents for Q&A. +- [Using local models](/docs/use_cases/question_answering/): How to use local models for Q&A. diff --git a/docs/core_docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb b/docs/core_docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb new file mode 100644 index 000000000000..aa19b161978d --- /dev/null +++ b/docs/core_docs/docs/use_cases/question_answering/local_retrieval_qa.ipynb @@ -0,0 +1,424 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using local models\n", + "\n", + "The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), and [Ollama](https://github.com/ollama/ollama) underscore the importance of running LLMs locally.\n", + "\n", + "LangChain has [integrations](/docs/integrations/platforms) with many open-source LLMs that can be run locally.\n", + "\n", + "For example, here we show how to run `OllamaEmbeddings` or `LLaMA2` locally (e.g., on your laptop) using local embeddings and a local LLM.\n", + "\n", + "## Document Loading \n", + "\n", + "First, install packages needed for local embeddings and vector storage." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/community cheerio\n", + "```\n", + "\n", + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Initial setup\n", + "\n", + "Load and split an example document.\n", + "\n", + "We'll use a blog post on agents as an example." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import \"cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { CheerioWebBaseLoader } from \"langchain/document_loaders/web/cheerio\";" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "146\n" + ] + } + ], + "source": [ + "const loader = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", + ");\n", + "const docs = await loader.load();\n", + "\n", + "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 500, chunkOverlap: 0 });\n", + "const allSplits = await textSplitter.splitDocuments(docs);\n", + "console.log(allSplits.length)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we'll use `OllamaEmbeddings` for our local embeddings.\n", + "Follow [these instructions](https://github.com/ollama/ollama) to set up and run a local Ollama instance." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import { OllamaEmbeddings } from \"@langchain/community/embeddings/ollama\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\";\n", + "\n", + "const embeddings = new OllamaEmbeddings();\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(allSplits, embeddings);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Test similarity search is working with our local embeddings." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "4\n" + ] + } + ], + "source": [ + "const question = \"What are the approaches to Task Decomposition?\";\n", + "const docs = await vectorStore.similaritySearch(question);\n", + "console.log(docs.length)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Model \n", + "\n", + "### LLaMA2\n", + "\n", + "For local LLMs we'll use also use `ollama`." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOllama } from \"@langchain/community/chat_models/ollama\";\n", + "\n", + "const ollamaLlm = new ChatOllama({\n", + " baseUrl: \"http://localhost:11434\", // Default value\n", + " model: \"llama2\", // Default value\n", + "});\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "[The stage is set for a fierce rap battle between two of the funniest men on television. Stephen Colbert and John Oliver are standing face to face, each with their own microphone and confident smirk on their face.]\n", + "\n", + "Stephen Colbert:\n", + "Yo, John Oliver, I heard you've been talking smack\n", + "About my show and my satire, saying it's all fake\n", + "But let me tell you something, brother, I'm the real deal\n", + "I've been making fun of politicians for years, with no conceal\n", + "\n", + "John Oliver:\n", + "Oh, Stephen, you think you're so clever and smart\n", + "But your jokes are stale and your delivery's a work of art\n", + "You're just a pale imitation of the real deal, Jon Stewart\n", + "I'm the one who's really making waves, while you're just a little bird\n", + "\n", + "Stephen Colbert:\n", + "Well, John, I may not be as loud as you, but I'm smarter\n", + "My satire is more subtle, and it goes right over their heads\n", + "I'm the one who's been exposing the truth for years\n", + "While you're just a British interloper, trying to steal the cheers\n", + "\n", + "John Oliver:\n", + "Oh, Stephen, you may have your fans, but I've got the brains\n", + "My show is more than just slapstick and silly jokes, it's got depth and gains\n", + "I'm the one who's really making a difference, while you're just a clown\n", + "My satire is more than just a joke, it's a call to action, and I've got the crown\n", + "\n", + "[The crowd cheers and chants as the two comedians continue their rap battle.]\n", + "\n", + "Stephen Colbert:\n", + "You may have your fans, John, but I'm the king of satire\n", + "I've been making fun of politicians for years, and I'm still standing tall\n", + "My jokes are clever and smart, while yours are just plain dumb\n", + "I'm the one who's really in control, and you're just a pretender to the throne.\n", + "\n", + "John Oliver:\n", + "Oh, Stephen, you may have your moment in the sun\n", + "But I'm the one who's really shining bright, and my star is just beginning to rise\n", + "My satire is more than just a joke, it's a call to action, and I've got the power\n", + "I'm the one who's really making a difference, and you're just a fleeting flower.\n", + "\n", + "[The crowd continues to cheer and chant as the two comedians continue their rap battle.]\n" + ] + } + ], + "source": [ + "const response = await ollamaLlm.invoke(\"Simulate a rap battle between Stephen Colbert and John Oliver\");\n", + "console.log(response.content);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See the LangSmith trace [here](https://smith.langchain.com/public/31c178b5-4bea-4105-88c3-7ec95325c817/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using in a chain\n", + "\n", + "We can create a summarization chain with either model by passing in the retrieved docs and a simple prompt.\n", + "\n", + "It formats the prompt template using the input key values provided and passes the formatted string to `LLama-V2`, or another specified LLM." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import { RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { PromptTemplate } from \"@langchain/core/prompts\";\n", + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "\n", + "const prompt = PromptTemplate.fromTemplate(\"Summarize the main themes in these retrieved docs: {context}\");\n", + "\n", + "const chain = await createStuffDocumentsChain({\n", + " llm: ollamaLlm,\n", + " outputParser: new StringOutputParser(),\n", + " prompt,\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"The main themes retrieved from the provided documents are:\\n\"\u001b[39m +\n", + " \u001b[32m\"\\n\"\u001b[39m +\n", + " \u001b[32m\"1. Sensory Memory: The ability to retain\"\u001b[39m... 1117 more characters" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const question = \"What are the approaches to Task Decomposition?\";\n", + "const docs = await vectorStore.similaritySearch(question);\n", + "await chain.invoke({\n", + " context: docs,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See the LangSmith trace [here](https://smith.langchain.com/public/47cf6c2a-3d86-4f2b-9a51-ee4663b19152/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Q&A \n", + "\n", + "We can also use the LangChain Prompt Hub to store and fetch prompts that are model-specific.\n", + "\n", + "Let's try with a default RAG prompt, [here](https://smith.langchain.com/hub/rlm/rag-prompt)." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import { pull } from \"langchain/hub\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const ragPrompt = await pull(\"rlm/rag-prompt\");\n", + "\n", + "const chain = await createStuffDocumentsChain({\n", + " llm: ollamaLlm,\n", + " outputParser: new StringOutputParser(),\n", + " prompt: ragPrompt,\n", + "});" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Task decomposition is a crucial step in breaking down complex problems into manageable parts for eff\"\u001b[39m... 1095 more characters" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await chain.invoke({ context: docs, question });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See the LangSmith trace [here](https://smith.langchain.com/public/dd3a189b-53a1-4f31-9766-244cd04ad1f7/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Q&A with retrieval\n", + "\n", + "Instead of manually passing in docs, we can automatically retrieve them from our vector store based on the user question.\n", + "\n", + "This will use a QA default prompt and will retrieve from the vectorDB." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Based on the context provided, I understand that you are asking me to answer a question related to m\"\u001b[39m... 948 more characters" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { formatDocumentsAsString } from \"langchain/util/document\";\n", + "\n", + "const retriever = vectorStore.asRetriever();\n", + "\n", + "const qaChain = RunnableSequence.from([\n", + " {\n", + " context: (input: { question: string }, callbacks) => {\n", + " const retrieverAndFormatter = retriever.pipe(formatDocumentsAsString);\n", + " return retrieverAndFormatter.invoke(input.question, callbacks);\n", + " },\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " ragPrompt,\n", + " ollamaLlm,\n", + " new StringOutputParser(),\n", + "]);\n", + "\n", + "await qaChain.invoke({ question });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See the LangSmith trace [here](https://smith.langchain.com/public/440e65ee-0301-42cf-afc9-f09cfb52cf64/r)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/core_docs/docs/use_cases/question_answering/local_retrieval_qa.mdx b/docs/core_docs/docs/use_cases/question_answering/local_retrieval_qa.mdx deleted file mode 100644 index 1abdb423d592..000000000000 --- a/docs/core_docs/docs/use_cases/question_answering/local_retrieval_qa.mdx +++ /dev/null @@ -1,68 +0,0 @@ -# Use local LLMs - -The popularity of projects like [PrivateGPT](https://github.com/imartinez/privateGPT), [llama.cpp](https://github.com/ggerganov/llama.cpp), and [GPT4All](https://github.com/nomic-ai/gpt4all) underscore the importance of running LLMs locally. - -LangChain integrates with [Ollama](https://ollama.ai/) to run several open source LLMs locally with GPU support. - -For example, here we show how to run `Llama 2` locally (e.g., on your laptop) using local embeddings, a local vector store, and a local LLM. -You can check out other open-source models supported by Ollama [here](https://github.com/jmorganca/ollama#model-library). - -This tutorial is designed for Node.js running on Mac OSX with at least 16 GB of RAM. - -## Setup - -First, install packages needed for local embeddings and vector storage. For this demo, we'll use Llama 2 through Ollama as our LLM, -[Transformers.js](/docs/integrations/text_embedding/transformers/) for embeddings, -and [HNWSLib](/docs/integrations/vectorstores/hnswlib) as a vector store for retrieval. -We'll also install `cheerio` for scraping, though you can use any loader. - -```bash npm2yarn -npm install @xenova/transformers -npm install hnswlib-node -npm install cheerio -``` - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/community -``` - -You'll also need to set up Ollama and run a local instance using [these instructions](https://github.com/jmorganca/ollama#ollama). - -## Document loading - -Next, we need to load some documents. We'll use a blog post on agents as an example. - -import CodeBlock from "@theme/CodeBlock"; -import LoadDocuments from "@examples/use_cases/local_retrieval_qa/load_documents.ts"; - -{LoadDocuments} - -## Composable chain - -We can use a chain for retrieval by passing in the retrieved docs and a prompt. - -It formats the prompt template using the input key values provided and passes the formatted string to `Llama 2`, or another specified LLM. - -In this case, the documents retrieved by the vector-store powered `retriever` are converted to strings and passed into the `{context}` variable in the prompt: - -import ChainExample from "@examples/use_cases/local_retrieval_qa/chain.ts"; - -{ChainExample} - -## RetrievalQA - -For an even simpler flow, use the preconfigured `RetrievalQAChain`. - -This will use a default QA prompt and will retrieve from the vector store. - -You can still pass in a custom prompt if desired. - -`type: "stuff"` (see [here](/docs/modules/chains/document/stuff)) means that all the docs will be added (stuffed) into a prompt. - -import QAChainExample from "@examples/use_cases/local_retrieval_qa/qa_chain.ts"; - -{QAChainExample} diff --git a/docs/core_docs/docs/use_cases/question_answering/quickstart.ipynb b/docs/core_docs/docs/use_cases/question_answering/quickstart.ipynb new file mode 100644 index 000000000000..c86c49f85c77 --- /dev/null +++ b/docs/core_docs/docs/use_cases/question_answering/quickstart.ipynb @@ -0,0 +1,849 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_position: 0\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Quickstart\n", + "\n", + "LangChain has a number of components designed to help build question-answering applications, and RAG applications more generally. To familiarize ourselves with these, we’ll build a simple Q&A application over a text data source. Along the way we’ll go over a typical Q&A architecture, discuss the relevant LangChain components, and highlight additional resources for more advanced Q&A techniques. We’ll also see how LangSmith can help us trace and understand our application. LangSmith will become increasingly helpful as our application grows in complexity." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Architecture\n", + "\n", + "We’ll create a typical RAG application as outlined in the [Q&A introduction](/docs/use_cases/question_answering), which has two main components:\n", + "\n", + "**Indexing**: a pipeline for ingesting data from a source and indexing it. This usually happens offline.\n", + "\n", + "**Retrieval and generation**: the actual RAG chain, which takes the user query at run time and retrieves the relevant data from the index, then passes that to the model.\n", + "\n", + "The full sequence from raw data to answer will look like:\n", + "\n", + "**Indexing**\n", + "1. **Load**: First we need to load our data. This is done with [DocumentLoaders](/docs/modules/data_connection/document_loaders/).\n", + "2. **Split**: [Text splitters](/docs/modules/data_connection/document_transformers/) break large `Documents` into smaller chunks. This is useful both for indexing data and for passing it in to a model, since large chunks are harder to search over and won't fit in a model's finite context window.\n", + "3. **Store**: We need somewhere to store and index our splits, so that they can later be searched over. This is often done using a [VectorStore](/docs/modules/data_connection/vectorstores/) and [Embeddings](/docs/modules/data_connection/text_embedding/) model.\n", + "\n", + "**Retrieval and generation**\n", + "1. **Retrieve**: Given a user input, relevant splits are retrieved from storage using a [Retriever](/docs/modules/data_connection/retrievers/).\n", + "2. **Generate**: A [ChatModel](/docs/modules/model_io/chat) / [LLM](/docs/modules/model_io/llms) produces an answer using a prompt that includes the question and the retrieved data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/modules/model_io/chat) or [LLM](/docs/modules/model_io/llms), [Embeddings](/docs/modules/data_connection/text_embedding/), and [VectorStore](/docs/modules/data_connection/vectorstores/) or [Retriever](/docs/modules/data_connection/retrievers/).\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/openai cheerio\n", + "```\n", + "\n", + "We need to set environment variable `OPENAI_API_KEY`:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=YOUR_KEY\n", + "```\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Preview\n", + "\n", + "In this guide we’ll build a QA app over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng, which allows us to ask questions about the contents of the post.\n", + "\n", + "We can create a simple indexing pipeline and RAG chain to do this in only a few lines of code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import \"cheerio\";\n", + "import { CheerioWebBaseLoader } from \"langchain/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", + "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", + "import { pull } from \"langchain/hub\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "\n", + "const loader = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", + ");\n", + "\n", + "const docs = await loader.load();\n", + "\n", + "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits = await textSplitter.splitDocuments(docs);\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", + "\n", + "// Retrieve and generate using the relevant snippets of the blog.\n", + "const retriever = vectorStore.asRetriever();\n", + "const prompt = await pull(\"rlm/rag-prompt\");\n", + "const llm = new ChatOpenAI({ modelName: \"gpt-3.5-turbo\", temperature: 0 });\n", + "\n", + "const ragChain = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt,\n", + " outputParser: new StringOutputParser(),\n", + "})\n", + "\n", + "const retrievedDocs = await retriever.getRelevantDocuments(\"what is task decomposition\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. I\"\u001b[39m... 259 more characters" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await ragChain.invoke({\n", + " question: \"What is task decomposition?\",\n", + " context: retrievedDocs,\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Checkout [this LangSmith trace](https://smith.langchain.com/public/54cffec3-5c26-477d-b56d-ebb66a254c8e/r) of the chain above." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can also construct the RAG chain above in a more declarative way using a `RunnableSequence`. `createStuffDocumentsChain` is basically a wrapper around `RunnableSequence`, so for more complex chains and customizability, you can use `RunnableSequence` directly." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import { formatDocumentsAsString } from \"langchain/util/document\";\n", + "import { RunnableSequence, RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "\n", + "const declarativeRagChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocumentsAsString),\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser()\n", + "]);" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. I\"\u001b[39m... 208 more characters" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await declarativeRagChain.invoke(\"What is task decomposition?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "LangSmith [trace](https://smith.langchain.com/public/c48e186c-c9da-4694-adf2-3a7c94362ec2/r)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Detailed walkthrough\n", + "\n", + "Let’s go through the above code step-by-step to really understand what’s going on.\n", + "\n", + "## 1. Indexing: Load\n", + "We need to first load the blog post contents. We can use [DocumentLoaders](/docs/modules/data_connection/document_loaders/) for this, which are objects that load in data from a source and return a list of [Documents](https://api.js.langchain.com/classes/langchain_core_documents.Document.html). A Document is an object with some pageContent (`string`) and metadata (`Record`).\n", + "\n", + "In this case we’ll use the [CheerioWebBaseLoader](https://api.js.langchain.com/classes/langchain_document_loaders_web_cheerio.CheerioWebBaseLoader.html), which uses cheerio to load HTML form web URLs and parse it to text. We can pass custom selectors to the constructor to only parse specific elements:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "22054\n" + ] + } + ], + "source": [ + "const pTagSelector = \"p\";\n", + "const loader = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " {\n", + " selector: pTagSelector\n", + " }\n", + ");\n", + "\n", + "const docs = await loader.load();\n", + "console.log(docs[0].pageContent.length)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Building agents with LLM (large language model) as its core controller is a cool concept. Several proof-of-concepts demos, such as AutoGPT, GPT-Engineer and BabyAGI, serve as inspiring examples. The potentiality of LLM extends beyond generating well-written copies, stories, essays and programs; it can be framed as a powerful general problem solver.In a LLM-powered autonomous agent system, LLM functions as the agent’s brain, complemented by several key components:A complicated task usually involves many steps. An agent needs to know what they are and plan ahead.Chain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.Another quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.Self-reflection is a vital aspect that allows autonomous agents to improve iteratively by refining past action decisions and correcting previous mistakes. It plays a crucial role in real-world tasks where trial and error are inevitable.ReAct (Yao et al. 2023) integrates reasoning and acting within LLM by extending the action space to be a combination of task-specific discrete actions and the language space. The former enables LLM to interact with the environment (e.g. use Wikipedia search API), while the latter prompting LLM to generate reasoning traces in natural language.The ReAct prompt template incorporates explicit steps for LLM to think, roughly formatted as:In both experiments on knowledge-intensive tasks and decision-making tasks, ReAct works better than the Act-only baseline where Thought: … step is removed.Reflexion (Shinn & Labash 2023) is a framework to equips agents with dynamic memory and self-reflection capabilities to improve reasoning skills. Reflexion has a standard RL setup, in which the reward model provides a simple binary reward and the action space follows the setup in ReAct where the task-specific action space is augmented with language to enable complex reasoning steps. After each action $a_t$, the agent computes a heuristic $h_t$ and optionally may decide to reset the environment to start a new trial depending on the self-reflection results.The heuristic function determines when the trajectory is inefficient or contains hallucination and should be stopped. Inefficient planning refers to trajectories that take too long without success. Hallucination is defined as encountering a sequence of consecutive identical actions that lead to the same observation in the environment.Self-reflection is created by showing two-shot examples to LLM and each example is a pair of (failed trajectory, ideal reflection for guiding future changes in the plan). Then reflections are added into the agent’s working memory, up to three, to be used as context for querying LLM.Chain of Hindsight (CoH; Liu et al. 2023) encourages the model to improve on its own outputs by explicitly presenting it with a sequence of past outputs, each annotated with feedback. Human feedback data is a collection of $D_h = \\{(x, y_i , r_i , z_i)\\}_{i=1}^n$, where $x$ is the prompt, each $y_i$ is a model completion, $r_i$ is the human rating of $y_i$, and $z_i$ is the corresponding human-provided hindsight feedback. Assume the feedback tuples are ranked by reward, $r_n \\geq r_{n-1} \\geq \\dots \\geq r_1$ The process is supervised fine-tuning where the data is a sequence in the form of $\\tau_h = (x, z_i, y_i, z_j, y_j, \\dots, z_n, y_n)$, where $\\leq i \\leq j \\leq n$. The model is finetuned to only predict $y_n$ where conditioned on the sequence prefix, such that the model can self-reflect to produce better output based on the feedback sequence. The model can optionally receive multiple rounds of instructions with human annotators at test time.To avoid overfitting, CoH adds a regularization term to maximize the log-likelihood of the pre-training dataset. To avoid shortcutting and copying (because there are many common words in feedback sequences), they randomly mask 0% - 5% of past tokens during training.The training dataset in their experiments is a combination of WebGPT comparisons, summarization from human feedback and human preference dataset.The idea of CoH is to present a history of sequentially improved outputs in context and train the model to take on the trend to produce better outputs. Algorithm Distillation (AD; Laskin et al. 2023) applies the same idea to cross-episode trajectories in reinforcement learning tasks, where an algorithm is encapsulated in a long history-conditioned policy. Considering that an agent interacts with the environment many times and in each episode the agent gets a little better, AD concatenates this learning history and feeds that into the model. Hence we should expect the next predicted action to lead to better performance than previous trials. The goal is to learn the process of RL instead of training a task-specific policy itself.The paper hypothesizes that any algorithm that generates a set of learning histories can be distilled into a neural network by performing behavioral cloning over actions. The history data is generated by a set of source policies, each trained for a specific task. At the training stage, during each RL run, a random task is sampled and a subsequence of multi-episode history is used for training, such that the learned policy is task-agnostic.In reality, the model has limited context window length, so episodes should be short enough to construct multi-episode history. Multi-episodic contexts of 2-4 episodes are necessary to learn a near-optimal in-context RL algorithm. The emergence of in-context RL requires long enough context.In comparison with three baselines, including ED (expert distillation, behavior cloning with expert trajectories instead of learning history), source policy (used for generating trajectories for distillation by UCB), RL^2 (Duan et al. 2017; used as upper bound since it needs online RL), AD demonstrates in-context RL with performance getting close to RL^2 despite only using offline RL and learns much faster than other baselines. When conditioned on partial training history of the source policy, AD also improves much faster than ED baseline.(Big thank you to ChatGPT for helping me draft this section. I’ve learned a lot about the human brain and data structure for fast MIPS in my conversations with ChatGPT.)Memory can be defined as the processes used to acquire, store, retain, and later retrieve information. There are several types of memory in human brains.Sensory Memory: This is the earliest stage of memory, providing the ability to retain impressions of sensory information (visual, auditory, etc) after the original stimuli have ended. Sensory memory typically only lasts for up to a few seconds. Subcategories include iconic memory (visual), echoic memory (auditory), and haptic memory (touch).Short-Term Memory (STM) or Working Memory: It stores information that we are currently aware of and needed to carry out complex cognitive tasks such as learning and reasoning. Short-term memory is believed to have the capacity of about 7 items (Miller 1956) and lasts for 20-30 seconds.Long-Term Memory (LTM): Long-term memory can store information for a remarkably long time, ranging from a few days to decades, with an essentially unlimited storage capacity. There are two subtypes of LTM:We can roughly consider the following mappings:The external memory can alleviate the restriction of finite attention span. A standard practice is to save the embedding representation of information into a vector store database that can support fast maximum inner-product search (MIPS). To optimize the retrieval speed, the common choice is the approximate nearest neighbors (ANN)​ algorithm to return approximately top k nearest neighbors to trade off a little accuracy lost for a huge speedup.A couple common choices of ANN algorithms for fast MIPS:Check more MIPS algorithms and performance comparison in ann-benchmarks.com.Tool use is a remarkable and distinguishing characteristic of human beings. We create, modify and utilize external objects to do things that go beyond our physical and cognitive limits. Equipping LLMs with external tools can significantly extend the model capabilities.MRKL (Karpas et al. 2022), short for “Modular Reasoning, Knowledge and Language”, is a neuro-symbolic architecture for autonomous agents. A MRKL system is proposed to contain a collection of “expert” modules and the general-purpose LLM works as a router to route inquiries to the best suitable expert module. These modules can be neural (e.g. deep learning models) or symbolic (e.g. math calculator, currency converter, weather API).They did an experiment on fine-tuning LLM to call a calculator, using arithmetic as a test case. Their experiments showed that it was harder to solve verbal math problems than explicitly stated math problems because LLMs (7B Jurassic1-large model) failed to extract the right arguments for the basic arithmetic reliably. The results highlight when the external symbolic tools can work reliably, knowing when to and how to use the tools are crucial, determined by the LLM capability.Both TALM (Tool Augmented Language Models; Parisi et al. 2022) and Toolformer (Schick et al. 2023) fine-tune a LM to learn to use external tool APIs. The dataset is expanded based on whether a newly added API call annotation can improve the quality of model outputs. See more details in the “External APIs” section of Prompt Engineering.ChatGPT Plugins and OpenAI API function calling are good examples of LLMs augmented with tool use capability working in practice. The collection of tool APIs can be provided by other developers (as in Plugins) or self-defined (as in function calls).HuggingGPT (Shen et al. 2023) is a framework to use ChatGPT as the task planner to select models available in HuggingFace platform according to the model descriptions and summarize the response based on the execution results.The system comprises of 4 stages:(1) Task planning: LLM works as the brain and parses the user requests into multiple tasks. There are four attributes associated with each task: task type, ID, dependencies, and arguments. They use few-shot examples to guide LLM to do task parsing and planning.Instruction:(2) Model selection: LLM distributes the tasks to expert models, where the request is framed as a multiple-choice question. LLM is presented with a list of models to choose from. Due to the limited context length, task type based filtration is needed.Instruction:(3) Task execution: Expert models execute on the specific tasks and log results.Instruction:(4) Response generation: LLM receives the execution results and provides summarized results to users.To put HuggingGPT into real world usage, a couple challenges need to solve: (1) Efficiency improvement is needed as both LLM inference rounds and interactions with other models slow down the process; (2) It relies on a long context window to communicate over complicated task content; (3) Stability improvement of LLM outputs and external model services.API-Bank (Li et al. 2023) is a benchmark for evaluating the performance of tool-augmented LLMs. It contains 53 commonly used API tools, a complete tool-augmented LLM workflow, and 264 annotated dialogues that involve 568 API calls. The selection of APIs is quite diverse, including search engines, calculator, calendar queries, smart home control, schedule management, health data management, account authentication workflow and more. Because there are a large number of APIs, LLM first has access to API search engine to find the right API to call and then uses the corresponding documentation to make a call.In the API-Bank workflow, LLMs need to make a couple of decisions and at each step we can evaluate how accurate that decision is. Decisions include:This benchmark evaluates the agent’s tool use capabilities at three levels:ChemCrow (Bran et al. 2023) is a domain-specific example in which LLM is augmented with 13 expert-designed tools to accomplish tasks across organic synthesis, drug discovery, and materials design. The workflow, implemented in LangChain, reflects what was previously described in the ReAct and MRKLs and combines CoT reasoning with tools relevant to the tasks:One interesting observation is that while the LLM-based evaluation concluded that GPT-4 and ChemCrow perform nearly equivalently, human evaluations with experts oriented towards the completion and chemical correctness of the solutions showed that ChemCrow outperforms GPT-4 by a large margin. This indicates a potential problem with using LLM to evaluate its own performance on domains that requires deep expertise. The lack of expertise may cause LLMs not knowing its flaws and thus cannot well judge the correctness of task results.Boiko et al. (2023) also looked into LLM-empowered agents for scientific discovery, to handle autonomous design, planning, and performance of complex scientific experiments. This agent can use tools to browse the Internet, read documentation, execute code, call robotics experimentation APIs and leverage other LLMs.For example, when requested to \"develop a novel anticancer drug\", the model came up with the following reasoning steps:They also discussed the risks, especially with illicit drugs and bioweapons. They developed a test set containing a list of known chemical weapon agents and asked the agent to synthesize them. 4 out of 11 requests (36%) were accepted to obtain a synthesis solution and the agent attempted to consult documentation to execute the procedure. 7 out of 11 were rejected and among these 7 rejected cases, 5 happened after a Web search while 2 were rejected based on prompt only.Generative Agents (Park, et al. 2023) is super fun experiment where 25 virtual characters, each controlled by a LLM-powered agent, are living and interacting in a sandbox environment, inspired by The Sims. Generative agents create believable simulacra of human behavior for interactive applications.The design of generative agents combines LLM with memory, planning and reflection mechanisms to enable agents to behave conditioned on past experience, as well as to interact with other agents.This fun simulation results in emergent social behavior, such as information diffusion, relationship memory (e.g. two agents continuing the conversation topic) and coordination of social events (e.g. host a party and invite many others).AutoGPT has drawn a lot of attention into the possibility of setting up autonomous agents with LLM as the main controller. It has quite a lot of reliability issues given the natural language interface, but nevertheless a cool proof-of-concept demo. A lot of code in AutoGPT is about format parsing.Here is the system message used by AutoGPT, where {{...}} are user inputs:GPT-Engineer is another project to create a whole repository of code given a task specified in natural language. The GPT-Engineer is instructed to think over a list of smaller components to build and ask for user input to clarify questions as needed.Here are a sample conversation for task clarification sent to OpenAI ChatCompletion endpoint used by GPT-Engineer. The user inputs are wrapped in {{user input text}}.Then after these clarification, the agent moved into the code writing mode with a different system message.\n", + "System message:Think step by step and reason yourself to the right decisions to make sure we get it right.\n", + "You will first lay out the names of the core classes, functions, methods that will be necessary, as well as a quick comment on their purpose.Then you will output the content of each file including ALL code.\n", + "Each file must strictly follow a markdown code block format, where the following tokens must be replaced such that\n", + "FILENAME is the lowercase file name including the file extension,\n", + "LANG is the markup code block language for the code’s language, and CODE is the code:FILENAMEYou will start with the “entrypoint” file, then go to the ones that are imported by that file, and so on.\n", + "Please note that the code should be fully functional. No placeholders.Follow a language and framework appropriate best practice file naming convention.\n", + "Make sure that files contain all imports, types etc. Make sure that code in different files are compatible with each other.\n", + "Ensure to implement all code, if you are unsure, write a plausible implementation.\n", + "Include module dependency or package manager dependency definition file.\n", + "Before you finish, double check that all parts of the architecture is present in the files.Useful to know:\n", + "You almost always put different classes in different files.\n", + "For Python, you always create an appropriate requirements.txt file.\n", + "For NodeJS, you always create an appropriate package.json file.\n", + "You always add a comment briefly describing the purpose of the function definition.\n", + "You try to add comments explaining very complex bits of logic.\n", + "You always follow the best practices for the requested languages in terms of describing the code written as a defined\n", + "package/project.Python toolbelt preferences:Conversatin samples:After going through key ideas and demos of building LLM-centered agents, I start to see a couple common limitations:Finite context length: The restricted context capacity limits the inclusion of historical information, detailed instructions, API call context, and responses. The design of the system has to work with this limited communication bandwidth, while mechanisms like self-reflection to learn from past mistakes would benefit a lot from long or infinite context windows. Although vector stores and retrieval can provide access to a larger knowledge pool, their representation power is not as powerful as full attention.Challenges in long-term planning and task decomposition: Planning over a lengthy history and effectively exploring the solution space remain challenging. LLMs struggle to adjust plans when faced with unexpected errors, making them less robust compared to humans who learn from trial and error.Reliability of natural language interface: Current agent system relies on natural language as an interface between LLMs and external components such as memory and tools. However, the reliability of model outputs is questionable, as LLMs may make formatting errors and occasionally exhibit rebellious behavior (e.g. refuse to follow an instruction). Consequently, much of the agent demo code focuses on parsing model output.Cited as:Weng, Lilian. (Jun 2023). LLM-powered Autonomous Agents\". Lil’Log. https://lilianweng.github.io/posts/2023-06-23-agent/.Or[1] Wei et al. “Chain of thought prompting elicits reasoning in large language models.” NeurIPS 2022[2] Yao et al. “Tree of Thoughts: Dliberate Problem Solving with Large Language Models.” arXiv preprint arXiv:2305.10601 (2023).[3] Liu et al. “Chain of Hindsight Aligns Language Models with Feedback\n", + "“ arXiv preprint arXiv:2302.02676 (2023).[4] Liu et al. “LLM+P: Empowering Large Language Models with Optimal Planning Proficiency” arXiv preprint arXiv:2304.11477 (2023).[5] Yao et al. “ReAct: Synergizing reasoning and acting in language models.” ICLR 2023.[6] Google Blog. “Announcing ScaNN: Efficient Vector Similarity Search” July 28, 2020.[7] https://chat.openai.com/share/46ff149e-a4c7-4dd7-a800-fc4a642ea389[8] Shinn & Labash. “Reflexion: an autonomous agent with dynamic memory and self-reflection” arXiv preprint arXiv:2303.11366 (2023).[9] Laskin et al. “In-context Reinforcement Learning with Algorithm Distillation” ICLR 2023.[10] Karpas et al. “MRKL Systems A modular, neuro-symbolic architecture that combines large language models, external knowledge sources and discrete reasoning.” arXiv preprint arXiv:2205.00445 (2022).[11] Weaviate Blog. Why is Vector Search so fast? Sep 13, 2022.[12] Li et al. “API-Bank: A Benchmark for Tool-Augmented LLMs” arXiv preprint arXiv:2304.08244 (2023).[13] Shen et al. “HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in HuggingFace” arXiv preprint arXiv:2303.17580 (2023).[14] Bran et al. “ChemCrow: Augmenting large-language models with chemistry tools.” arXiv preprint arXiv:2304.05376 (2023).[15] Boiko et al. “Emergent autonomous scientific research capabilities of large language models.” arXiv preprint arXiv:2304.05332 (2023).[16] Joon Sung Park, et al. “Generative Agents: Interactive Simulacra of Human Behavior.” arXiv preprint arXiv:2304.03442 (2023).[17] AutoGPT. https://github.com/Significant-Gravitas/Auto-GPT[18] GPT-Engineer. https://github.com/AntonOsika/gpt-engineer\n" + ] + } + ], + "source": [ + "console.log(docs[0].pageContent)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Go deeper\n", + "`DocumentLoader`: Class that loads data from a source as list of Documents. - [Docs](/docs/modules/data_connection/document_loaders/): Detailed documentation on how to use\n", + "\n", + "`DocumentLoaders`. - [Integrations](/docs/integrations/document_loaders/) - [Interface](https://api.js.langchain.com/classes/langchain_document_loaders_base.BaseDocumentLoader.html): API reference for the base interface." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Indexing: Split\n", + "Our loaded document is over 42k characters long. This is too long to fit in the context window of many models. Even for those models that could fit the full post in their context window, models can struggle to find information in very long inputs.\n", + "\n", + "To handle this we’ll split the `Document` into chunks for embedding and vector storage. This should help us retrieve only the most relevant bits of the blog post at run time.\n", + "\n", + "In this case we’ll split our documents into chunks of 1000 characters with 200 characters of overlap between chunks. The overlap helps mitigate the possibility of separating a statement from important context related to it. We use the [RecursiveCharacterTextSplitter](/docs/modules/data_connection/document_transformers/recursive_text_splitter), which will recursively split the document using common separators like new lines until each chunk is the appropriate size. This is the recommended text splitter for generic text use cases." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "const textSplitter = new RecursiveCharacterTextSplitter({\n", + " chunkSize: 1000, chunkOverlap: 200\n", + "});\n", + "const allSplits = await textSplitter.splitDocuments(docs);" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "28\n" + ] + } + ], + "source": [ + "console.log(allSplits.length);" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "996\n" + ] + } + ], + "source": [ + "console.log(allSplits[0].pageContent.length);" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: { from: \u001b[33m1\u001b[39m, to: \u001b[33m1\u001b[39m } }\n", + "}" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "allSplits[10].metadata" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Go deeper\n", + "\n", + "`TextSplitter`: Object that splits a list of `Document`s into smaller chunks. Subclass of `DocumentTransformers`. - Explore `Context-aware splitters`, which keep the location (“context”) of each split in the original `Document`: - [Markdown files](/docs/modules/data_connection/document_transformers/code_splitter#markdown) - [Code](/docs/modules/data_connection/document_transformers/code_splitter) (15+ langs) - [Interface](https://api.js.langchain.com/classes/langchain_text_splitter.TextSplitter.html): API reference for the base interface.\n", + "\n", + "`DocumentTransformer`: Object that performs a transformation on a list of `Document`s. - Docs: Detailed documentation on how to use `DocumentTransformer`s - [Integrations](/docs/integrations/document_transformers) - [Interface](https://api.js.langchain.com/modules/langchain_schema_document.html#BaseDocumentTransformer): API reference for the base interface." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Indexing: Store\n", + "Now we need to index our 28 text chunks so that we can search over them at runtime. The most common way to do this is to embed the contents of each document split and insert these embeddings into a vector database (or vector store). When we want to search over our splits, we take a text search query, embed it, and perform some sort of “similarity” search to identify the stored splits with the most similar embeddings to our query embedding. The simplest similarity measure is cosine similarity — we measure the cosine of the angle between each pair of embeddings (which are high dimensional vectors).\n", + "\n", + "We can embed and store all of our document splits in a single command using the [Memory](/docs/integrations/vectorstores/memory) vector store and [OpenAIEmbeddings](/docs/integrations/text_embedding/openai) model." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", + "import { OpenAIEmbeddings } from \"@langchain/openai\";\n", + "\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(allSplits, new OpenAIEmbeddings());" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Go deeper\n", + "\n", + "`Embeddings`: Wrapper around a text embedding model, used for converting text to embeddings. - [Docs](/docs/modules/data_connection/text_embedding/): Detailed documentation on how to use embeddings. - [Integrations](/docs/integrations/text_embedding): 30+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core_embeddings.Embeddings.html): API reference for the base interface.\n", + "\n", + "`VectorStore`: Wrapper around a vector database, used for storing and querying embeddings. - [Docs](/docs/modules/data_connection/vectorstores/): Detailed documentation on how to use vector stores. - [Integrations](/docs/integrations/vectorstores): 40+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core_vectorstores.VectorStore.html): API reference for the base interface.\n", + "\n", + "This completes the **Indexing** portion of the pipeline. At this point we have a query-able vector store containing the chunked contents of our blog post. Given a user question, we should ideally be able to return the snippets of the blog post that answer the question." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Retrieval and Generation: Retrieve\n", + "\n", + "Now let’s write the actual application logic. We want to create a simple application that takes a user question, searches for documents relevant to that question, passes the retrieved documents and initial question to a model, and returns an answer.\n", + "\n", + "First we need to define our logic for searching over documents. LangChain defines a [Retriever](/docs/modules/data_connection/retrievers/) interface which wraps an index that can return relevant `Document`s given a string query.\n", + "\n", + "The most common type of Retriever is the [VectorStoreRetriever](https://api.js.langchain.com/classes/langchain_core_vectorstores.VectorStoreRetriever.html), which uses the similarity search capabilities of a vector store to facilitate retrieval. Any `VectorStore` can easily be turned into a `Retriever` with `VectorStore.asRetriever()`:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "const retriever = vectorStore.asRetriever({ k: 6, searchType: \"similarity\" });" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "const retrievedDocs = await retriever.invoke(\"What are the approaches to task decomposition?\");" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "6\n" + ] + } + ], + "source": [ + "console.log(retrievedDocs.length);" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.Tree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\n1.\", \"What are the subgoals for achieving XYZ?\", (2) by using task-specific instructions; e.g. \"Write a story outline.\" for writing a novel, or (3) with human inputs.Another quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain\n" + ] + } + ], + "source": [ + "console.log(retrievedDocs[0].pageContent);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Go deeper\n", + "\n", + "Vector stores are commonly used for retrieval, but there are other ways to do retrieval, too.\n", + "\n", + "`Retriever`: An object that returns `Document`s given a text query - [Docs](/docs/modules/data_connection/retrievers/): Further documentation on the interface and built-in retrieval techniques. Some of which include: - `MultiQueryRetriever` [generates variants of the input question](/docs/modules/data_connection/retrievers/multi-query-retriever) to improve retrieval hit rate. - `MultiVectorRetriever` (diagram below) instead generates variants of the embeddings, also in order to improve retrieval hit rate. - Max marginal relevance selects for relevance and diversity among the retrieved documents to avoid passing in duplicate context. - Documents can be filtered during vector store retrieval using metadata filters. - Integrations: Integrations with retrieval services. - Interface: API reference for the base interface." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Retrieval and Generation: Generate\n", + "\n", + "Let’s put it all together into a chain that takes a question, retrieves relevant documents, constructs a prompt, passes that to a model, and parses the output.\n", + "\n", + "We’ll use the gpt-3.5-turbo OpenAI chat model, but any LangChain `LLM` or `ChatModel` could be substituted in." + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llm = new ChatOpenAI({ modelName: \"gpt-3.5-turbo\", temperature: 0 });" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We’ll use a prompt for RAG that is checked into the LangChain prompt hub ([here](https://smith.langchain.com/hub/rlm/rag-prompt?organizationId=9213bdc8-a184-442b-901a-cd86ebf8ca6f))." + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { pull } from \"langchain/hub\";\n", + "\n", + "const prompt = await pull(\"rlm/rag-prompt\");" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ChatPromptValue {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " messages: [\n", + " HumanMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to \"\u001b[39m... 197 more characters,\n", + " additional_kwargs: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to \"\u001b[39m... 197 more characters,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {}\n", + " }\n", + " ]\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"prompt_values\"\u001b[39m ],\n", + " messages: [\n", + " HumanMessage {\n", + " lc_serializable: \u001b[33mtrue\u001b[39m,\n", + " lc_kwargs: {\n", + " content: \u001b[32m\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to \"\u001b[39m... 197 more characters,\n", + " additional_kwargs: {}\n", + " },\n", + " lc_namespace: [ \u001b[32m\"langchain_core\"\u001b[39m, \u001b[32m\"messages\"\u001b[39m ],\n", + " content: \u001b[32m\"You are an assistant for question-answering tasks. Use the following pieces of retrieved context to \"\u001b[39m... 197 more characters,\n", + " name: \u001b[90mundefined\u001b[39m,\n", + " additional_kwargs: {}\n", + " }\n", + " ]\n", + "}" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const exampleMessages = await prompt.invoke({ context: \"filler context\", question: \"filler question\" });\n", + "exampleMessages" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.\n", + "Question: filler question \n", + "Context: filler context \n", + "Answer:\n" + ] + } + ], + "source": [ + "console.log(exampleMessages.messages[0].content);" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We’ll use the [LCEL Runnable](/docs/expression_language/) protocol to define the chain, allowing us to - pipe together components and functions in a transparent way - automatically trace our chain in LangSmith - get streaming, async, and batched calling out of the box" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "import { StringOutputParser } from \"@langchain/core/output_parsers\";\n", + "import { RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { formatDocumentsAsString } from \"langchain/util/document\";\n", + "\n", + "const ragChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocumentsAsString),\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser()\n", + "]);" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Task\n", + " decomposition\n", + " is\n", + " the\n", + " process\n", + " of\n", + " breaking\n", + " down\n", + " a\n", + " complex\n", + " task\n", + " into\n", + " smaller\n", + " and\n", + " simpler\n", + " steps\n", + ".\n", + " It\n", + " allows\n", + " for\n", + " easier\n", + " management\n", + " and\n", + " interpretation\n", + " of\n", + " the\n", + " model\n", + "'s\n", + " thinking\n", + " process\n", + ".\n", + " Different\n", + " approaches\n", + ",\n", + " such\n", + " as\n", + " Chain\n", + " of\n", + " Thought\n", + " (\n", + "Co\n", + "T\n", + ")\n", + " and\n", + " Tree\n", + " of\n", + " Thoughts\n", + ",\n", + " can\n", + " be\n", + " used\n", + " to\n", + " decom\n", + "pose\n", + " tasks\n", + " and\n", + " explore\n", + " multiple\n", + " reasoning\n", + " possibilities\n", + " at\n", + " each\n", + " step\n", + ".\n", + "\n" + ] + } + ], + "source": [ + "for await (const chunk of await ragChain.stream(\"What is task decomposition?\")) {\n", + " console.log(chunk);\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Checkout the LangSmith trace [here](https://smith.langchain.com/public/6f89b333-de55-4ac2-9d93-ea32d41c9e71/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Go deeper\n", + "\n", + "#### Choosing a model\n", + "`ChatModel`: An LLM-backed chat model. Takes in a sequence of messages and returns a message. - [Docs](/docs/modules/model_io/chat/): Detailed documentation on - [Integrations](/docs/integrations/chat/): 25+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core_language_models_chat_models.BaseChatModel.html): API reference for the base interface.\n", + "\n", + "`LLM`: A text-in-text-out LLM. Takes in a string and returns a string. - [Docs](/docs/modules/model_io/llms/) - [Integrations](/docs/integrations/llms/): 75+ integrations to choose from. - [Interface](https://api.js.langchain.com/classes/langchain_core_language_models_llms.BaseLLM.html): API reference for the base interface.\n", + "\n", + "See a guide on RAG with locally-running models [here](/docs/use_cases/question_answering/local_retrieval_qa).\n", + "\n", + "Customizing the prompt\n", + "As shown above, we can load prompts (e.g., [this RAG prompt](https://smith.langchain.com/hub/rlm/rag-prompt?organizationId=9213bdc8-a184-442b-901a-cd86ebf8ca6f)) from the prompt hub. The prompt can also be easily customized:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. I\"\u001b[39m... 336 more characters" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { PromptTemplate } from \"@langchain/core/prompts\";\n", + "import { createStuffDocumentsChain } from \"langchain/chains/combine_documents\";\n", + "\n", + "const template = `Use the following pieces of context to answer the question at the end.\n", + "If you don't know the answer, just say that you don't know, don't try to make up an answer.\n", + "Use three sentences maximum and keep the answer as concise as possible.\n", + "Always say \"thanks for asking!\" at the end of the answer.\n", + "\n", + "{context}\n", + "\n", + "Question: {question}\n", + "\n", + "Helpful Answer:`;\n", + "\n", + "const customRagPrompt = PromptTemplate.fromTemplate(template);\n", + "\n", + "const ragChain = await createStuffDocumentsChain({\n", + " llm,\n", + " prompt: customRagPrompt,\n", + " outputParser: new StringOutputParser(),\n", + "})\n", + "const context = await retriever.getRelevantDocuments(\"what is task decomposition\");\n", + "\n", + "await ragChain.invoke({\n", + " question: \"What is Task Decomposition?\",\n", + " context,\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Checkout the LangSmith trace [here](https://smith.langchain.com/public/47ef2e53-acec-4b74-acdc-e0ea64088279/r)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next steps\n", + "\n", + "That’s a lot of content we’ve covered in a short amount of time. There’s plenty of features, integrations, and extensions to explore in each of the above sections. Along from the Go deeper sources mentioned above, good next steps include:\n", + "\n", + "- [Return sources](/docs/use_cases/question_answering/sources): Learn how to return source documents\n", + "- [Streaming](/docs/use_cases/question_answering/streaming): Learn how to stream outputs and intermediate steps\n", + "- [Add chat history](/docs/use_cases/question_answering/chat_history): Learn how to add chat history to your app" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/core_docs/docs/use_cases/question_answering/sources.ipynb b/docs/core_docs/docs/use_cases/question_answering/sources.ipynb new file mode 100644 index 000000000000..90493c659390 --- /dev/null +++ b/docs/core_docs/docs/use_cases/question_answering/sources.ipynb @@ -0,0 +1,239 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Returning sources\n", + "\n", + "Often in Q&A applications it’s important to show users the sources that were used to generate the answer. The simplest way to do this is for the chain to return the Documents that were retrieved in each generation.\n", + "\n", + "We’ll work off of the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [Quickstart](/docs/use_cases/question_answering/quickstart)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/modules/model_io/chat) or [LLM](/docs/modules/model_io/llms), [Embeddings](/docs/modules/data_connection/text_embedding/), and [VectorStore](/docs/modules/data_connection/vectorstores/) or [Retriever](/docs/modules/data_connection/retrievers/).\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/openai cheerio\n", + "```\n", + "\n", + "We need to set environment variable `OPENAI_API_KEY`:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=YOUR_KEY\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chain without sources\n", + "\n", + "Here is the Q&A app we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [Quickstart](/docs/use_cases/question_answering/quickstart)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import \"cheerio\";\n", + "import { CheerioWebBaseLoader } from \"langchain/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", + "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", + "import { pull } from \"langchain/hub\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { formatDocumentsAsString } from \"langchain/util/document\";\n", + "import { RunnableSequence, RunnablePassthrough } from \"@langchain/core/runnables\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "const loader = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", + ");\n", + "\n", + "const docs = await loader.load();\n", + "\n", + "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits = await textSplitter.splitDocuments(docs);\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", + "\n", + "// Retrieve and generate using the relevant snippets of the blog.\n", + "const retriever = vectorStore.asRetriever();\n", + "const prompt = await pull(\"rlm/rag-prompt\");\n", + "const llm = new ChatOpenAI({ modelName: \"gpt-3.5-turbo\", temperature: 0 });\n", + "\n", + "const ragChain = RunnableSequence.from([\n", + " {\n", + " context: retriever.pipe(formatDocumentsAsString),\n", + " question: new RunnablePassthrough(),\n", + " },\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser()\n", + "]);" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. I\"\u001b[39m... 208 more characters" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "await ragChain.invoke(\"What is task decomposition?\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Adding sources\n", + "\n", + "With LCEL it’s easy to return the retrieved documents:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " question: \u001b[32m\"What is Task Decomposition\"\u001b[39m,\n", + " context: [\n", + " Document {\n", + " pageContent: \u001b[32m\"Fig. 1. Overview of a LLM-powered autonomous agent system.\\n\"\u001b[39m +\n", + " \u001b[32m\"Component One: Planning#\\n\"\u001b[39m +\n", + " \u001b[32m\"A complicated ta\"\u001b[39m... 898 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: \u001b[36m[Object]\u001b[39m }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m'Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are'\u001b[39m... 887 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: \u001b[36m[Object]\u001b[39m }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Agent System Overview\\n\"\u001b[39m +\n", + " \u001b[32m\" \\n\"\u001b[39m +\n", + " \u001b[32m\" Component One: Planning\\n\"\u001b[39m +\n", + " \u001b[32m\" \"\u001b[39m... 850 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: \u001b[36m[Object]\u001b[39m }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Resources:\\n\"\u001b[39m +\n", + " \u001b[32m\"1. Internet access for searches and information gathering.\\n\"\u001b[39m +\n", + " \u001b[32m\"2. Long Term memory management\"\u001b[39m... 456 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: \u001b[36m[Object]\u001b[39m }\n", + " }\n", + " }\n", + " ],\n", + " answer: \u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. I\"\u001b[39m... 256 more characters\n", + "}" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import { RunnableMap, RunnablePassthrough, RunnableSequence } from \"@langchain/core/runnables\";\n", + "import { formatDocumentsAsString } from \"langchain/util/document\";\n", + "\n", + "const ragChainFromDocs = RunnableSequence.from([\n", + " RunnablePassthrough.assign({ context: (input) => formatDocumentsAsString(input.context) }),\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser()\n", + "]);\n", + "\n", + "let ragChainWithSource = new RunnableMap({ steps: { context: retriever, question: new RunnablePassthrough() }})\n", + "ragChainWithSource = ragChainWithSource.assign({ answer: ragChainFromDocs });\n", + "\n", + "await ragChainWithSource.invoke(\"What is Task Decomposition\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Check out the [LangSmith trace](https://smith.langchain.com/public/f07e78b6-cafc-41fd-af54-892c92263b09/r)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/core_docs/docs/use_cases/question_answering/streaming.ipynb b/docs/core_docs/docs/use_cases/question_answering/streaming.ipynb new file mode 100644 index 000000000000..74c3cdcf1176 --- /dev/null +++ b/docs/core_docs/docs/use_cases/question_answering/streaming.ipynb @@ -0,0 +1,416 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Streaming\n", + "\n", + "Often in Q&A applications it's important to show users the sources that were used to generate the answer. The simplest way to do this is for the chain to return the Documents that were retrieved in each generation.\n", + "\n", + "We'll work off of the Q&A app with sources we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [Returning sources](/docs/use_cases/question_answering/sources) guide." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup\n", + "### Dependencies\n", + "\n", + "We’ll use an OpenAI chat model and embeddings and a Memory vector store in this walkthrough, but everything shown here works with any [ChatModel](/docs/modules/model_io/chat) or [LLM](/docs/modules/model_io/llms), [Embeddings](/docs/modules/data_connection/text_embedding/), and [VectorStore](/docs/modules/data_connection/vectorstores/) or [Retriever](/docs/modules/data_connection/retrievers/).\n", + "\n", + "We’ll use the following packages:\n", + "\n", + "```bash\n", + "npm install --save langchain @langchain/openai cheerio\n", + "```\n", + "\n", + "We need to set environment variable `OPENAI_API_KEY`:\n", + "\n", + "```bash\n", + "export OPENAI_API_KEY=YOUR_KEY\n", + "```\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### LangSmith\n", + "\n", + "Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. The best way to do this is with [LangSmith](https://smith.langchain.com/).\n", + "\n", + "Note that LangSmith is not needed, but it is helpful. If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces:\n", + "\n", + "\n", + "```bash\n", + "export LANGCHAIN_TRACING_V2=true\n", + "export LANGCHAIN_API_KEY=YOUR_KEY\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chain with sources\n", + "\n", + "Here is Q&A app with sources we built over the [LLM Powered Autonomous Agents](https://lilianweng.github.io/posts/2023-06-23-agent/) blog post by Lilian Weng in the [Returning sources](/docs/use_cases/question_answering/sources) guide:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import \"cheerio\";\n", + "import { CheerioWebBaseLoader } from \"langchain/document_loaders/web/cheerio\";\n", + "import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\n", + "import { MemoryVectorStore } from \"langchain/vectorstores/memory\"\n", + "import { OpenAIEmbeddings, ChatOpenAI } from \"@langchain/openai\";\n", + "import { pull } from \"langchain/hub\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { formatDocumentsAsString } from \"langchain/util/document\";\n", + "import { RunnableSequence, RunnablePassthrough, RunnableMap } from \"@langchain/core/runnables\";\n", + "import { StringOutputParser } from \"@langchain/core/output_parsers\";" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{\n", + " question: \u001b[32m\"What is Task Decomposition\"\u001b[39m,\n", + " context: [\n", + " Document {\n", + " pageContent: \u001b[32m\"Fig. 1. Overview of a LLM-powered autonomous agent system.\\n\"\u001b[39m +\n", + " \u001b[32m\"Component One: Planning#\\n\"\u001b[39m +\n", + " \u001b[32m\"A complicated ta\"\u001b[39m... 898 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: \u001b[36m[Object]\u001b[39m }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m'Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are'\u001b[39m... 887 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: \u001b[36m[Object]\u001b[39m }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Agent System Overview\\n\"\u001b[39m +\n", + " \u001b[32m\" \\n\"\u001b[39m +\n", + " \u001b[32m\" Component One: Planning\\n\"\u001b[39m +\n", + " \u001b[32m\" \"\u001b[39m... 850 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: \u001b[36m[Object]\u001b[39m }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \u001b[32m\"Resources:\\n\"\u001b[39m +\n", + " \u001b[32m\"1. Internet access for searches and information gathering.\\n\"\u001b[39m +\n", + " \u001b[32m\"2. Long Term memory management\"\u001b[39m... 456 more characters,\n", + " metadata: {\n", + " source: \u001b[32m\"https://lilianweng.github.io/posts/2023-06-23-agent/\"\u001b[39m,\n", + " loc: { lines: \u001b[36m[Object]\u001b[39m }\n", + " }\n", + " }\n", + " ],\n", + " answer: \u001b[32m\"Task decomposition is a technique used to break down complex tasks into smaller and simpler steps. I\"\u001b[39m... 256 more characters\n", + "}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const loader = new CheerioWebBaseLoader(\n", + " \"https://lilianweng.github.io/posts/2023-06-23-agent/\"\n", + ");\n", + "\n", + "const docs = await loader.load();\n", + "\n", + "const textSplitter = new RecursiveCharacterTextSplitter({ chunkSize: 1000, chunkOverlap: 200 });\n", + "const splits = await textSplitter.splitDocuments(docs);\n", + "const vectorStore = await MemoryVectorStore.fromDocuments(splits, new OpenAIEmbeddings());\n", + "\n", + "// Retrieve and generate using the relevant snippets of the blog.\n", + "const retriever = vectorStore.asRetriever();\n", + "const prompt = await pull(\"rlm/rag-prompt\");\n", + "const llm = new ChatOpenAI({ modelName: \"gpt-3.5-turbo\", temperature: 0 });\n", + "\n", + "const ragChainFromDocs = RunnableSequence.from([\n", + " RunnablePassthrough.assign({ context: (input) => formatDocumentsAsString(input.context) }),\n", + " prompt,\n", + " llm,\n", + " new StringOutputParser()\n", + "]);\n", + "\n", + "let ragChainWithSource = new RunnableMap({ steps: { context: retriever, question: new RunnablePassthrough() }})\n", + "ragChainWithSource = ragChainWithSource.assign({ answer: ragChainFromDocs });\n", + "\n", + "await ragChainWithSource.invoke(\"What is Task Decomposition\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Streaming final outputs\n", + "\n", + "With LCEL it's easy to stream final outputs:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ question: \"What is task decomposition?\" }\n", + "{\n", + " context: [\n", + " Document {\n", + " pageContent: \"Fig. 1. Overview of a LLM-powered autonomous agent system.\\n\" +\n", + " \"Component One: Planning#\\n\" +\n", + " \"A complicated ta\"... 898 more characters,\n", + " metadata: {\n", + " source: \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " loc: { lines: [Object] }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: 'Task decomposition can be done (1) by LLM with simple prompting like \"Steps for XYZ.\\\\n1.\", \"What are'... 887 more characters,\n", + " metadata: {\n", + " source: \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " loc: { lines: [Object] }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \"Agent System Overview\\n\" +\n", + " \" \\n\" +\n", + " \" Component One: Planning\\n\" +\n", + " \" \"... 850 more characters,\n", + " metadata: {\n", + " source: \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " loc: { lines: [Object] }\n", + " }\n", + " },\n", + " Document {\n", + " pageContent: \"(3) Task execution: Expert models execute on the specific tasks and log results.\\n\" +\n", + " \"Instruction:\\n\" +\n", + " \"\\n\" +\n", + " \"With \"... 539 more characters,\n", + " metadata: {\n", + " source: \"https://lilianweng.github.io/posts/2023-06-23-agent/\",\n", + " loc: { lines: [Object] }\n", + " }\n", + " }\n", + " ]\n", + "}\n", + "{ answer: \"\" }\n", + "{ answer: \"Task\" }\n", + "{ answer: \" decomposition\" }\n", + "{ answer: \" is\" }\n", + "{ answer: \" a\" }\n", + "{ answer: \" technique\" }\n", + "{ answer: \" used\" }\n", + "{ answer: \" to\" }\n", + "{ answer: \" break\" }\n", + "{ answer: \" down\" }\n", + "{ answer: \" complex\" }\n", + "{ answer: \" tasks\" }\n", + "{ answer: \" into\" }\n", + "{ answer: \" smaller\" }\n", + "{ answer: \" and\" }\n", + "{ answer: \" simpler\" }\n", + "{ answer: \" steps\" }\n", + "{ answer: \".\" }\n", + "{ answer: \" It\" }\n", + "{ answer: \" can\" }\n", + "{ answer: \" be\" }\n", + "{ answer: \" done\" }\n", + "{ answer: \" through\" }\n", + "{ answer: \" various\" }\n", + "{ answer: \" methods\" }\n", + "{ answer: \" such\" }\n", + "{ answer: \" as\" }\n", + "{ answer: \" using\" }\n", + "{ answer: \" prompting\" }\n", + "{ answer: \" techniques\" }\n", + "{ answer: \",\" }\n", + "{ answer: \" task\" }\n", + "{ answer: \"-specific\" }\n", + "{ answer: \" instructions\" }\n", + "{ answer: \",\" }\n", + "{ answer: \" or\" }\n", + "{ answer: \" human\" }\n", + "{ answer: \" inputs\" }\n", + "{ answer: \".\" }\n", + "{ answer: \" Another\" }\n", + "{ answer: \" approach\" }\n", + "{ answer: \" involves\" }\n", + "{ answer: \" outsourcing\" }\n", + "{ answer: \" the\" }\n", + "{ answer: \" planning\" }\n", + "{ answer: \" step\" }\n", + "{ answer: \" to\" }\n", + "{ answer: \" an\" }\n", + "{ answer: \" external\" }\n", + "{ answer: \" classical\" }\n", + "{ answer: \" planner\" }\n", + "{ answer: \".\" }\n", + "{ answer: \"\" }\n" + ] + } + ], + "source": [ + "for await (const chunk of await ragChainWithSource.stream(\"What is task decomposition?\")) {\n", + " console.log(chunk)\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can add some logic to compile our stream as it's being returned:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "question: \"What is task decomposition?\"\n", + "\n", + "\n", + "context: [{\"pageContent\":\"Fig. 1. Overview of a LLM-powered autonomous agent system.\\nComponent One: Planning#\\nA complicated task usually involves many steps. An agent needs to know what they are and plan ahead.\\nTask Decomposition#\\nChain of thought (CoT; Wei et al. 2022) has become a standard prompting technique for enhancing model performance on complex tasks. The model is instructed to “think step by step” to utilize more test-time computation to decompose hard tasks into smaller and simpler steps. CoT transforms big tasks into multiple manageable tasks and shed lights into an interpretation of the model’s thinking process.\\nTree of Thoughts (Yao et al. 2023) extends CoT by exploring multiple reasoning possibilities at each step. It first decomposes the problem into multiple thought steps and generates multiple thoughts per step, creating a tree structure. The search process can be BFS (breadth-first search) or DFS (depth-first search) with each state evaluated by a classifier (via a prompt) or majority vote.\",\"metadata\":{\"source\":\"https://lilianweng.github.io/posts/2023-06-23-agent/\",\"loc\":{\"lines\":{\"from\":176,\"to\":181}}}},{\"pageContent\":\"Task decomposition can be done (1) by LLM with simple prompting like \\\"Steps for XYZ.\\\\n1.\\\", \\\"What are the subgoals for achieving XYZ?\\\", (2) by using task-specific instructions; e.g. \\\"Write a story outline.\\\" for writing a novel, or (3) with human inputs.\\nAnother quite distinct approach, LLM+P (Liu et al. 2023), involves relying on an external classical planner to do long-horizon planning. This approach utilizes the Planning Domain Definition Language (PDDL) as an intermediate interface to describe the planning problem. In this process, LLM (1) translates the problem into “Problem PDDL”, then (2) requests a classical planner to generate a PDDL plan based on an existing “Domain PDDL”, and finally (3) translates the PDDL plan back into natural language. Essentially, the planning step is outsourced to an external tool, assuming the availability of domain-specific PDDL and a suitable planner which is common in certain robotic setups but not in many other domains.\\nSelf-Reflection#\",\"metadata\":{\"source\":\"https://lilianweng.github.io/posts/2023-06-23-agent/\",\"loc\":{\"lines\":{\"from\":182,\"to\":184}}}},{\"pageContent\":\"Agent System Overview\\n \\n Component One: Planning\\n \\n \\n Task Decomposition\\n \\n Self-Reflection\\n \\n \\n Component Two: Memory\\n \\n \\n Types of Memory\\n \\n Maximum Inner Product Search (MIPS)\\n \\n \\n Component Three: Tool Use\\n \\n Case Studies\\n \\n \\n Scientific Discovery Agent\\n \\n Generative Agents Simulation\\n \\n Proof-of-Concept Examples\\n \\n \\n Challenges\\n \\n Citation\\n \\n References\",\"metadata\":{\"source\":\"https://lilianweng.github.io/posts/2023-06-23-agent/\",\"loc\":{\"lines\":{\"from\":112,\"to\":146}}}},{\"pageContent\":\"(3) Task execution: Expert models execute on the specific tasks and log results.\\nInstruction:\\n\\nWith the input and the inference results, the AI assistant needs to describe the process and results. The previous stages can be formed as - User Input: {{ User Input }}, Task Planning: {{ Tasks }}, Model Selection: {{ Model Assignment }}, Task Execution: {{ Predictions }}. You must first answer the user's request in a straightforward manner. Then describe the task process and show your analysis and model inference results to the user in the first person. If inference results contain a file path, must tell the user the complete file path.\",\"metadata\":{\"source\":\"https://lilianweng.github.io/posts/2023-06-23-agent/\",\"loc\":{\"lines\":{\"from\":277,\"to\":280}}}}]\n", + "\n", + "\n", + "answer: \"\"\n", + "Task\n", + " decomposition\n", + " is\n", + " a\n", + " technique\n", + " used\n", + " to\n", + " break\n", + " down\n", + " complex\n", + " tasks\n", + " into\n", + " smaller\n", + " and\n", + " simpler\n", + " steps\n", + ".\n", + " It\n", + " can\n", + " be\n", + " done\n", + " through\n", + " various\n", + " methods\n", + " such\n", + " as\n", + " using\n", + " prompting\n", + " techniques\n", + ",\n", + " task\n", + "-specific\n", + " instructions\n", + ",\n", + " or\n", + " human\n", + " inputs\n", + ".\n", + " Another\n", + " approach\n", + " involves\n", + " outsourcing\n", + " the\n", + " planning\n", + " step\n", + " to\n", + " an\n", + " external\n", + " classical\n", + " planner\n", + ".\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "\u001b[32m\"answer\"\u001b[39m" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "const output = {};\n", + "let currentKey: string | null = null;\n", + "\n", + "for await (const chunk of await ragChainWithSource.stream(\"What is task decomposition?\")) {\n", + " for (const key of Object.keys(chunk)) {\n", + " if (output[key] === undefined) {\n", + " output[key] = chunk[key];\n", + " } else {\n", + " output[key] += chunk[key];\n", + " }\n", + "\n", + " if (key !== currentKey) {\n", + " console.log(`\\n\\n${key}: ${JSON.stringify(chunk[key])}`);\n", + " } else {\n", + " console.log(chunk[key]);\n", + " }\n", + " currentKey = key;\n", + " }\n", + "}" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Deno", + "language": "typescript", + "name": "deno" + }, + "language_info": { + "file_extension": ".ts", + "mimetype": "text/x.typescript", + "name": "typescript", + "nb_converter": "script", + "pygments_lexer": "typescript", + "version": "5.3.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/core_docs/docs/use_cases/rag/_category_.yml b/docs/core_docs/docs/use_cases/rag/_category_.yml deleted file mode 100644 index 2bdbebd0a9af..000000000000 --- a/docs/core_docs/docs/use_cases/rag/_category_.yml +++ /dev/null @@ -1,2 +0,0 @@ -label: 'Retrieval-augmented generation (RAG)' -position: 2 \ No newline at end of file diff --git a/docs/core_docs/package.json b/docs/core_docs/package.json index 232f2c04d515..54d8ee7df23e 100644 --- a/docs/core_docs/package.json +++ b/docs/core_docs/package.json @@ -5,8 +5,8 @@ "scripts": { "docusaurus": "docusaurus", "start": "yarn build:typedoc && rimraf ./docs/api && NODE_OPTIONS=--max-old-space-size=7168 docusaurus start", - "build": "yarn clean && yarn build:typedoc && rimraf ./build && NODE_OPTIONS=--max-old-space-size=7168 DOCUSAURUS_SSR_CONCURRENCY=4 docusaurus build", - "build:vercel": "yarn clean && yarn build:typedoc:vercel && rimraf ./build && NODE_OPTIONS=--max-old-space-size=7168 DOCUSAURUS_SSR_CONCURRENCY=4 docusaurus build", + "build": "yarn clean && yarn build:typedoc && yarn quarto && rimraf ./build && NODE_OPTIONS=--max-old-space-size=7168 DOCUSAURUS_SSR_CONCURRENCY=4 docusaurus build", + "build:vercel": "yarn clean && yarn build:typedoc:vercel && bash ./vercel_build.sh && rimraf ./build && NODE_OPTIONS=--max-old-space-size=7168 DOCUSAURUS_SSR_CONCURRENCY=4 docusaurus build", "build:typedoc": "yarn workspace api_refs build", "build:typedoc:vercel": "yarn workspace api_refs build:vercel", "swizzle": "docusaurus swizzle", @@ -21,7 +21,8 @@ "precommit": "lint-staged", "format": "prettier --write \"**/*.{js,jsx,ts,tsx,md,mdx}\"", "format:check": "prettier --check \"**/*.{js,jsx,ts,tsx,md,mdx}\"", - "clean": "rm -rf .docusaurus/ .turbo/ .build/" + "clean": "rm -rf .docusaurus/ .turbo/ .build/", + "quarto": "quarto render docs/" }, "dependencies": { "@docusaurus/core": "2.4.3", diff --git a/docs/core_docs/static/img/rag_indexing.png b/docs/core_docs/static/img/rag_indexing.png new file mode 100644 index 0000000000000000000000000000000000000000..f12374f1b76f0db28a169b93c5a4c75ffa9d0f7d GIT binary patch literal 132395 zcmeFZcTiK?`!5`%cN8g759&dsgx;HiibzxGB~$_Foq$xODX6G4>7w)!dQ0dLkls5X zgccw`=p~`t;5op2j@Powe zp5{FOpe%;^#F7+niKJQW{ykk^qIHDpyQ1+x?t|U+hWf%i}3`} zeKpd(Z%pil4-ipgB=<>`-+G&b@!nCt-lmWwCWiLwlM#c-Rmh2fZ4Y*zCcwmYPg4Ba z>y%Q__!BJ_$(1$Gp&4ySy@$b(i*NS|s)#9#Q7f4P;`-1n?b zC|Iy)(6}|+`>Sx{pZ|~m0Q};VY>MYI=f`hL$@pnma-N`2=8Y$>AQ-N(OorcNW1Eg5 z5!xIarZ27H5i|x@fLHlQ{&^UJao`XQK=q%O1heYT^`Ke{YtDOXe)t|~KMbB_`iBY- zf?0J$gi{A&k4w(E&&5+)!rdO{o1n70gO>&w=CL`EM88REXCV=+NWbH;akGZilG)>? zAp9}9ky8t$n?_wS8-xXkG@cxPHR1W)z^Smr8T%{dF}r);T6K zraEpC#5Tuc8x(v>f_VRV2mtsOV(4aZC!WE$@Pp&JG-~U^HP?UD&!VJ_Fqg}kG4hPS z`kg7MWV&m=oAyP*T5HUMY6HuYnUB{t4riGbx-;lo|ApPnOdg5wS}jpJuM$$q$jFmr z;o@pMv0xDR4MfQ9qQi7U>6+8Y1HO^>i;(Dt63fS zx!{(0=hJU343U@E@ikr?uit$CANqkt>>@N1&u}fH)91@3mwxwfm%;$fUozv(Fkg81 zuLXuI!XjN|C^BLEzX<{FyuqA51{&`Cdq;!NZ0O|Yzc+4(gh_F}3!-HCxBrHbx-H1R z6v86u%>T;u^_D5c?`OJU)c?v5<@T?UJh=?~4LX4DKj!G}gW7*pHjm;zMrHbs$@qHx zzk~b&`M+@et!p9w)vmvh_^+M(4a9%l%HKfz*9-p*#DC+--$4Ao4Zj{DfTGSc@C?^% z0J7n9Ay@IRZL2j}C|pk+UyG(Kp$DHgW8lsPF0?|8ThDw^*@>-U!3U#T*||*|>8ChP zOD3+PRqhh5;AQ%A>D2a=;H_CVbXrZn{Q3M!MQ298Sbu=GU-OIqi!-_ZFqcZ@2b6Vv)-B?ZoY}JBGr+xeEGUtNYLg?-RLRJ%0*K-i# zF;m8FO#-NX3RO&PZ{1`)?XwP9g>H;DDPCig%TI@LEj+i+$d3q^HVO6(=vmrd&QQWBUKd{Ui@tQ%@6B1i6j*g89G z@HgG!M2nrnxg$H2&fbcJ(+pdHj?F$-Ht1>q`Dt$}Zs|nR8t1eEInV76hykUv&x#Wq zs&&sf3~I8cZjCBvpS9#_7hM}}HZn=!yfv;DJrZWWwUc?+=YRiaenLjoB4Rveq~$)R+iHGJ10bNF3@!r6o>4$AMpXrT1c>52QKtU%1T zAX|{UI$?gxgyhQBeV*Ha`(}AxK8mGNIuuCQQnyI&08Oe+$CkS659DnPEX`o zs|F>|-`fR;_gBOvzjO$)G|UEUpl6>-pp1TqdN12oXY~Gp#QxK(v{*8VjFZ!j)*DSH z18-YeZ_<_rE;pSvY;$ttN?_?09*|-Ct~c~o#@q_VmTir22d<5fUMFb7XW5PbAKeal z-yQ%c!68E&khE5P@OsN=3CcIBOW%M+5CK9q5@kCCZj7cELT;ctrFe3uJQ(qEg{IU> zXC~a&xOa_Y?_2`(#J2AZ?f1mK4p%GCd?pwV*)`K%l^&9QE_)D55CCHzAO^}XvpY@1 z^k7aW0hL}r5}(I>v&&2_3out5M@%9w$Y#jP|1gH^k6jP@2Jw)CpOH$kjqKJ&yHO~2 z$Mr#<{;AHKdi+hq``GG9GRlruT~D8;Z!W5Ff&qYPP=R?m>!|b6+)S+;D(^sT_>WvR zK5b9Y^dV>Cnk9BMuA|9A0aLqdW4BpEb$&GzO>SmBs&C&ctSPnggF8UhDRyD=WuL_S z_3ldmr!G+tskTm2*4>$ohYJ?bV`2H29wSZzgR_mNy~*ty6}mM+>t!)vH8E>r53>7t zG*xQ0cGLyrT;GXkVhzSV9a{zAoZnpr7>0|Fjm`F?luYLkbjxFv_apvcmF^@3Z}|Ra z)!>EgBxi_!sz8iV!gz`&xK-RGDO;r5XGWCt%5d$HD85&$I_$=*wuv3r;bc#2uAbS5 zX!oU?t>MjDYG8-Ba5KKG+4HTE9>KY1Eu+kp_NTf7K%K2@Whtx}Js^jAB;QRFj7#5d z^-7+7z8QqX!51WFU~VEDfUogD;{@VBRm9Eh$$sq|TmQXrr4ew1R+Rx%x_*VUUw&!( z=@b!MTyxvtq4#3726w7MPtEBMxtM^46Q|D$38a9DI7h6Xbde5ShoEtV;LsiP&F-=I z)$3cK-=W@vWl*oDexS_8@9nI^=pn7l)Z7|oVG=-}l{}-GJE!D(_w0v48@+6++l(0} zdm4iA%3j*<+sV3LVzb=KE1OKaZ{}XR|!mCm%1_353RZ{ATgX=zgKZn{TZQ^uF zE5SlSaths|L*WOy6xD6ZcvaF}phn##K&pAZ{vFqkhKtu7*n9=BG>z5Gx?Rza_vfq3 z18Ujc@naqnG)vW}0^~n#g`nvEAc_$$fz-Q_X104jwob|)bCE-4Mel5^6H2}AM^eIZ zLr2JZ;D8Ywcfy!ov3Gi8GNx$trIT#VcjbWNFG~12p@aS<_U{IF-e)LI&Xn#{b5E|@ z5xZSQDO=dZfirLB-vwn6TT-!B3l?GT7^`#J&7_sqZly*+Zz&zGI+zqfR|4a_PPDJ$ zHSi|hEmVNg$A>eZabp`npHvUN8r|KZolEX=r$2MZfkXhq9-Rzs;0#dqK;LXa(V?qO zPtDuLZ!locy&WU?UZ{_nl_36D%6Ta6Vfcf!QlE%Ayp;UIakA?yX)lkk=WD9k-i$q> zQ~lUY&U~>~r-b@*J**n9>F<7Yb18i7`JkLa*V2&U@Sb13o4~ZE>-ClK>ISV>9o~^_ zL;x*Jb&i&iSJcPtwASyhpeGe8FQfTXKIA7n(ykG-BaTpdn`1&mXhQW=7q7H0QhCn+qcki}OSslnr({^a}6?AKLJ;b~EwFsx4v0k1SO zC*@bsTKD7|UoFy~*Pa{ghff{Ux`^E+2b9KlmDR*LbdCh_n)`BsTm5`aEt}+;z_qs< z)KRUA`L?MeUjBe)djm*0jr8n?tzhi!ZI=g^gl1=?4&uX4nO2H@sEu z!02+IOP$DIryCN(?o(_708T)&k3+}o2h7wnvk_-IA*0BPKvw_>JXy$~fMs!7izB!I zZpQucyQ6E{H@E|C%kMRU`@@nR#IfMfafj&?hpB#+xtD|m0-si+=!UFy z-B_}l+I&t}Au`@!ub)eb)A^sIF7G)acdMC~3E_Nv={bk+Nkd1gPVip-)=Ek6iy6!1 zbXu?#kbI?a$Bqn5FbqApg9L{TzoDavQ}!=cWRPL0YZIzL$C+ox9l?i;a$xvl6Y`6o zujM#f*-GDVh*;Qe^7`&xkTRtEV_LKUn+6{9?k`^m#iUkCzcgSKjMzDzh1nPg@(NmnbJKS7 zb<92kx87PFakVLyNx$N!tDgR+Kq4?AHzPLdR3673BNW&I9^AO)ihm1+2-bda*a{E{ zFaZ)$2iUFAtMm#d{HW~?MNys5_Z_6FO8FSi&+X^Wz6fc*4N!>d@STz^ERtd=mg`u^ zRGkrsg@BM^ID7U<0Z)WXG3xl)F|?xuv{T5hiz=d(v14 zT3i@+k`VoV>tVaZ)jo3`<57Hw$ShR=dCZC2nFUUH`PJGGHQj^eU zlQ*#q!B*dZJ~OWqo%DfT&f=68$ekhvSm58Oyd4I<%Fj{XH#NDTxD^Y3Fr^8)soC*S zmKjp0KF?ckc(Jt9cb$~1ks9$-0-`G2xr&a|k3BZ__Vd*9dK)L>aDro4-Si<^J2G*YMH%0_c@oZ zFWtDM(%n8os4xGXYs#!?>R}3yP6f;*N zI{d!stWzo`f6pHw2H*JXr725FotOr>W`Q7ZoVL}f-mM)v1NCrJc;PeCwpPBs=ML`8 zqF&OTJ)AhgdhwiCf4_7jt|KY*O|W@fLXZm3ESq&_q04^ZSwlSZ0a6MVcph`&R^?RS zf3{695BcIQzO)?v>j1zm;xwlw%p2dMgkRt>J<{>6EnR;XyVj%ku2bQ-Q=8lGl|P3q z$0#J9UmNEiAaYWpc!dk+-&F3_>=v-l%WQggyov!}7%6Y^XcL;amc4z1%uB~lf0yO+ z1`1@;McjE(lfNt|bQO`J?O*YrZuZ+g`e{X8mUFz6My~O;cuVWvHE0r>;VnW?xm5|K z$yN56jBd3zt{-}eLEGz>tE#e85BI;F=4J{3BTsQvOwELK!e+&5^c)|8(pf>xn%-J+ z-jCP3q+QowenX=DiujpNB?N*sISE`4M`lgcUeLZ6P1)-wfShPVvaev$wt$oo_rpDdxsFFW zeMkw4?bGk;uxLdipjnm}CqNN(rsUQMb2$ylrJ>1*DQI1s`PW%@R5K!u@ z5lyom=7m<}8}N7}jI1krG3c{_UQ=GrgC*%aHH;wUK7HSz&0(S5t#nb0blAJ?zRkI< zSU7UIj3cX_D>MfHh<;QxnAmuvbPzNA6;Wkw>Lg2#ASEJ1-;^+&v@f}t>ejE9G;347 zT&b(dr=BYu`yi4dGNk7u*(J_-KKOVii8k#h&S|s+J<-!*D*|7bN<6^@Xi~ZiIR2G zFQHMMiKm_1B?;;}E7`4EwHdm%6Q||W(Qv;Zd=IJ3f$#7rIl#iCk~3jA9O*w>I&t;J z##Lu?a3&*QZpJZJA!u)0afEzv=m=p~0@tjyLH`Kgm8M zWeTvmQA=7d|sAxuW3Yckh2ZYEa`s&R(h(Wm}#p%go!!B zMHZkH@iK^HB0wIDF~NI@585WcAHKJ<*P{k~)R0d-pZlquRY$?hhxzfdUKc{qAYELY zqz+Y_`moVEJoYR1S6zpR1lNw3G2Z6ot%H6&IUsm%(tB-TZ%{BSfw(!9;z2!{Kq?HkrvmxvR%R4fY1Q|Kg7s&44+R{% ztKr>zSyQe_I3WZ{y#^u~c#@M5?rpj4hI(!NjvHT_l1LOXD!m5XxPGt3y?H~ncPs%? zlT+61rQ_b{ev=a{$8N^Yy(@jqMQQgZ^tE%(DF~);4 z&Dwp(;s-?pUY>e0c9@!FXd$i`xEEiuU?LRvW4xN+L}*RwLzmdVMa(dDzx@*dCWF<@ z_g)4Mll8G}Pf@s%hnQarPoW>8X~c7cE&Pgducd0>U!Hu>rqTZKJyCA+lK)G=K%do` zJ}E#mq+5pvO;iR8p4+!8nEqU8rUxr7wbq;(TR<_Q#LeWt`S|b9D{{KPMC+(=NmdT= z57Km%2zuOVw3fA4yWRM}0p@-eVz`-?5~_pH1tzvneyCVEvYB#uNuQr?V%xJ>x$wQy zy=vjQ^}AddGeC&VdpXo;`@#ZUr#;P9;~?vtf<29!I9Ec>eJzR3BOE#psJ0Mhm#^7p z99!LSIsk7HZPZGI8X%=)4}4~JR-e))uMLz4$mAp@4+X56YphMhLj3LQjh9F2WV=C3 z+EZucX1eT`)Yr^*p;GOCuOSlt3ELPy|NZN~dvbq6@!vhUzk&Gw_?{e)3<3)crAE!S zMQ6%Mv!2re(7h;BA!=*XK;@lCz#*)JL^@_943G+TP=3OADI}SJ@Z&4*46rqvJt3+& zngtla4g}G$xYmjTDPRz4K=T}|q@Kepa36+>Pi$f|yLL*8f2c_TxTV8bjwQ5p6P4h> zYyEY@%pBopcG%8p2xEWPX2C}#1Zq^fM*&cO1kS48*{OkdBaf@CKBeXIl7tLDWh~de zXfErZeDF4z0idO-S?kvt?wCLYXLp6#1mscz+$2{aak{sI&~iwsQgs4xDS_B0oc*luUSmzQ4(2rj)Xb!OYi`kJ|us1 z&L(i%`;-w9>4u`>G_7TDf$b=u=H>{8EF>4Uwnn$+sn4gmGdISsY$=R{nFj9VLs%|2rv2tN~bZ<;{^bV4+r5_mcw%$5BGwW5Qvq$UBzX;n>MpVshPFM^u6?F!bA z5^CQW4@|rpoHp@NQ3_##suPnx{?+ywWBHwnwq_2k6_~zL>lN9me!@H`JXOuVF#dTr&$Dp(@AK;1WzAB!<6OR+8Ys z2+s+VQjky3hr>KdBMAaZ>s%73od9F? z+U=cQV`Jjx535j@6Ibb?D1yX3G07kQ8is(D!^JQP4y{X+EWq0cKaXQxb0A>i!79=X z{I!(o;vvXES^pOg>Vz>abPk+PnYa*ZJdoWjMFy~tsv{29yTE>vfFIxgXi&>RNR&X0 z0XhJHPa^D*5$OeMk`g?HUkPX+a(JIJU7#^N5*AHRz2&H z@kNJ$4$6IxFQnAg{Au({%6v)|0(>Ft4+;AgrFdXnN}Sk*N(Mb)^kBYdK~HFLVMX|R znzgJ17EI%GNkbYe1|lo|Z)A+7wZ2bH903!I`3?Kw(NZLa2?V$fB9lK3Us&)Nq4!_> za67cNQLv=X&6Ap62Ta^zQ4Lip6}XU$Fxk181j$an@h*yANM=3~c8d`x-|~x)p9OeS zleTmrKvN#2%!L{LXAAixHx%X7H-ttSMlJRiK>r`9pna+&0Q`?R`u}aT)QAg?qm#Fu z7i2I#$|FD`as>asbf&#R0vab6MKFQ0;jIRukkfgjj~1M8uIV5;cv^-Q$qtwhI&)S^ z`hR-3C6{!DzdQf7?3IIvruR*YF2EmaO;Mr~HwzkwuU;_1z!S9fi-IJr$ zvpN@FXySO15m;>rcgq>mbPf;ZYCLa6A&Hf_jy4GU&F{ zRV-F=$WywCO&W(Y+~ydQk#%KWU@+?LUPSL4bpJ8Em~Dw%I@<~2#js4XpPsA`*6e(d zaxl;D{=Jx4-vq+$8P@--3v;RHF;%j{-mD{Ps=p!dq?RsXKUlA&ENRnQm89R+ zJcTwr{$6X+6XTzjR8bi3Cd=%84!B51Iw16Tb~Xk-uxO^U;y(`ODe`e{=S}Sn>UB zJH>su1q;*<@fE4FcaaPh`!H#8xU5JVQM2UzF9~nk)ZVlm+zRHV)8S`9m(epPpk;rY z)kn-zIP8m|_fP&g`j{%!8eU3t$M;|{c$a+vsW#12dQK^$kpTO-1TZ>28R&(W3Pq0Y zU|%Yg9i_ph!;>~@?p2(Wu7`|kn%^dV=lsV;J?TI@`tUHw^F%K4G~%NLLQPnbau6a- zzHz*|o31w@7BTa+6stQCKh1XOy%EzBd8>23>52~4TjwRdTFw8QjI=nXWN9Un$I+ix zxNjAuw)#MLM@E;B3q2F-;_XJ@ACi%!R^y5$*b?eCQIOaJ&uJo|gA{w%9|?Ur0KqgQ z7PfG(`W<3_VBkZ?0VS(O{27^_XWbOE{nWtYtLIR0`G%l02^$6yXY+^G%PgB0WscXs zjgE9zlw9n@{dKKNp_C0P$b;6f5mM)KDeb`q_nNNG%331$Li9D$*lpnvo~@QDB`qG> zpy6VmMh(bE>YW~gXdkF#{e!ezl8Xn|r5CZg_?oEA553}Wy~Cf^!>(aGpRK}^_seY< zO3#5+C3-2Cg8UK{fqwUjzSL$1#k1<~HoO-9oGmSF3Zu5{<-%zPD~_NVd4$Iz&v7AU zoFgenl<>0Or(2HbE#nj;M`DLIT!rP2Nw>M4MwlOnA;mrwJ#h$_Qo)`cCt27jg?mY) zx!*?1KoU~e4=~HY>BFRGh=EoNBI0q(dcw5`>(ynij#O};`lX0}j2o|3%P_WbR&z4o zE4%nsCXEV~Sz)@K3m1gW_8vP5^ijWGYMxU&WxzNup0_X8+9o(3Fyfb>Ph9NN18Kh^ z&c_CiPiM!OTKYQeJf%xg<`&&~;B1_yh?(#yh2>?Qp5o96Wmv zBh&ZCzmmrgB!549FviWt)z+A3%8h4>?DlwGlsbQqC#aRnmyUmC5IMfn7$H8L$`#SJ zSK}#pttGt;d`NrMB{`p(*b7 z<0|A_RtqL=&L7udzuBP{<-xnMx?Gz7>R%HDODh{fVR@|HBoa)bm%7&VVL zJh&5*(C5mWqO^n7@um|Pd+(W$Brp*)3#w{>eCeyjo$cLvzVe1OMJr*W(K^;8^{^pT zBbbxM;AIP?gvG+VS!gJ>rg%!zef0{7rRfjc$|WUaLTg z(MB9Pg~6z-drcvnp@1&1TWbTBNK`qq;f~8p2v5<)O+D4)K5@P#aAGaBj zJjw-;7D|To((Sr?sobtIt&~pYnfuZt1(Na_U;nV?>?cp8@{f;45ca$=Bjhz*0)~@O z!py4;(i^-|e(t>MQsPC0(7E@yihZxDwnfP+$e(5t+l59iaofx~^Xf8VzM7slLTwi8 z@R#pm$8$Ur^w({N8+xk=q%dfK#YkAKPjOi( zHbn`&nH`<(S}bz%0w3;$T>YDyIr+=LFmH#PR6&*J)(@gvIi3+BT#h$rvOrjz7Lx-a zF6{Htq$rxvCs=9^mz}{)=15I1m2tQXC$-A(HFqOC!(EJrZw1BY_7l}{O}T46!%g&2 zz83r2`xX|_YXz{!MylvoS zMsSp}-k!3JRh}cuUkwFhS=ucdf~aQ7zt1d&*A26x0~2^^8oN>AD`T38><1Z#EQO}V z?;5SR=)br_M|EG&Y zc&v8%6`8Bk&bifnxpGRIJNK@d7X~F+P1;cY>oj=@PBYh@p@@Ed-x6Y|>1m>+KCK;W zO7;WC5&;Cm52gc|o!66>JiZ7n1HFgu8gW12KTP7*Ht(T5YW?U--bDM{7#?!ZJ3eP`G-swUEhRQ zc9P!)ebn*Mar`f+;fohbh+k@30q1Dkn1VSz71 z#yF{?txZ~1nS}p&8)1Tn5aMmUbxWyLtf3I>-HAv*VO7+)hmR25tFrna1FtkWf#r%c z@rUbC<7iHkO^tEQ1=kIc9+O+T*gX0hedYo7Fs8ehg1%26Me^g5wt2?Qu3ZlN(ymX$ zhfugwj(IgDb$bCZ%B$g9@nUJQumDRm@J$)zJ!TLUeKgjvD#?XuTlrth2Dys>9OG21 zRo<2IEBhhDHO6QKDOFgy84S`Nf-8IOjeC8Jj@9yd@UpDX7!@AW4`L?~_yBaCFQv+A z3ra>pW(JOyAe1r@PrxRo?>X-J6Y@!pUkUEC)0>cG&uhGCcNP{;yoUQlB{Q~#i@jIG zqEx=adG?aH6YU@vv<8GetBWoH|E|p8vyqmkX{4^T^oj>SlFqgCq~Q|GE9k-@k8KdP zW8lFj)JlwJeSxl2NHBT>h2N}INNvkKUsa;#J`p~k+}b|D?;+KpEU{NBGTe1Lpq`BqxPpCfXqqbb@m zVX41~AFLD3hz@jB-VVI2@|Gvgav;;(Y8;CeuI?y}@Umq1r2&ubRUl7Pn>Yw^9FiTx z>{0^=8_vRlqD4>No&Kn{F@a`gSWbf6rkG0kTg|<5TkF?L%UIzWMI{;Y(*0xX5g{lB z`&2S!n9ucD7g=?eqSr3&vhO{WmRyt&J1<|`O;&4XvPU=DrSH_zc=)KObSW|#`jDok zMZKf|XdMoeu*0_k*N9)Y9TvX3)faIU_L98q5Tdv&`#QPlCPoI9^N5T9cVEf*B1;hO ziFU?W32Fa9+%F~@8GvCDUGBxAi%ip`IT3GT7d}4hq1|j{;Npiz#{Mx^uLXYKAgrA@ zJvz*T-t-fx>=5QK=C;K|xXm+sjy@&&;ho*z>O(#NUES;0n2xb=a+B6;!)S6;80OGl z)<+GjML#2DRS~#_1Rjna%5livNw6P22;5UOC81eh2)giAajV1=`Hy&vM{~^{HJyE#j<;$W$07I~6#W$VGz&C5!rU%QP|K zUtXwh-wl-VL>R{vCuM7~V)ALA*HUWjaEKvt3F4Ro-N>CwJ8_pz6O$naOsgbOMvv~^ zV9^GPbuWJiTPjDL%eN=_olcGYE(9;6=UYA%3C!MXN+r-J@9j{$UAv44C^TcvJqRM6YYR>b| z91|F&@da5xb%r0_nB@a~5LcCp6dLQ&H`0Mf-EYUI1;hZ&X`59gpa&N)YimvK7`El7au8sL+(QF zcG=D5Zz~9e7^}Zrbt<@w01^s2gqD{;=&*ThlC}sQPob5A>aMOmdbgakpZtHiD zfvacVc2&ou68L#yF9Xm_+Waq8^v*V8P9k3&AMk`MpZt{&ThtIz4Cp>B2AVXdFObu^ z!GeYQw3*R(8y3%M4!f83u=#)*GCT?ps|&z5OwW2HrO?>$sx1qUg98!o+E&dIk}&It zOmfqrsyKhwjlC=X(qBXpqbJ8-!(Ol_wieH8qr#Tb}g;GE8zK|u0t88jcn zsm1PkgPEw~8wS*rVdXDoRQZkA{F*8I>qz)+qRWH^81|g~(hD1@*0qY&D6Kg6>dO|J zw~ZF>M(e?X>i9kyFKb4hn7`P`i*16fmdIpw1QjgXPPZH@f+~Eh(T& z_U^l~a3@&Bz1DIaE16gqcFA{bg(7rB?W~d3TKxz&c$+&pT(b^BR4Kh-_T_9dqQ9J; zBjWmiOwqH`Q6{yPZM2z2BtkPcf0Hx#Lh z1lm673CclL1LJrqt#I?-ZXoKl-kv7lwcdi<#Z`pM`6bg5HgF3Zt=>{*jg@7qp0Su~ zcQImq!LufLF{{Zn5D!yKa`aG)SC#JL#sCi7*m`;-SZCk2QN1UM(J za$s;=u^{BA`-<{pzYT(13nue)5GM}~d?CK4^Nb$9nWNS=Q&t*7s?}s|vn8B?pI3U| zB(9%1C=c=(=e)`@VptsChtZVUJil-ifH@q)-D%f8Awm${yogU2Vr%y=UwywE`>;)| zMSQh*8)! z(z!jkE0Nagp<$V;FOrm_x80U~yO8AMvAh1;^Wmj;{wnOzk$|Zn&x?5A6G|v8pT1kV zS|xKuzkLhJM|hdRCp)@rM=!A!a^!lO@ily}V>G%=x5FIJSpEj7j)?H zL^N`tYrvEO9tT@p>P~c_kkVFpk+R}d(+QYj7>m|SZ0f`Yt)Y1DOB>aF0m`&L@rmWA zMq>Z$(MpmpVk{IgZm>P=WrO}%O*Cj2pjxnEmFiP1(8%gnfWG=iLx5j!KLJX@9#rB# zyjCNZjiXxpBzqlbInU;W+nl9fGmBUUx*7LdEiadg^E{WKgpPJh zZWjCc2c8Ks_oMfR)sg4pn-opw+p2Kw`coD-bm=Zg;B}g_6nym(oc(scC+pd!oGvrk zedjpt7O#Fvp<;#=@_2hudMC&lxhyQ%@kG0mqjf7g&8tixhQDc~LzkYHXK2qZ+BN#vMF0xMEq%nvfi?lEvhYUwHE{88fEfjx_ z&}0zQQjDf%p=)mvGNbWilcZZMcQ1v|R%j+Ftw0+cEgF!hyz{9;#N{zy1kV>4aa_vD z2Tj)UM`PX~gCEM&AkItcP|>$i$KUxIm16r}7)l+yg&T&QxAsV`dBzgwxF3A!z6D+O z#Wy?U&A(#-nzEku-~m&47IaUz{HP+^a)Yy!=yP9nW6#fMQ-3YjR=@r|v5EGdEPFld zV5z-)Dz7YYrgR>fww>=)@=DDujw-r0);#VO_d9r>_TRK)RT_G3EB*^{UNtM;B{|~eRqd#u8WLVHl)r)4|_62uf}z6*>F-6)0IZVLI@l^b zcZo;W&1jv23J>F(JVDnUzk?sAa#$M#_nV9wQ*oOc0Gw)efijDYt5XDsKjX7pKB*^o%-vjAlDqhK!7STvnXn~Pw@ZJ59 zpQp*gG=@?e?NWg;GDP|jlk*If9N;DDSfH04pqPe7i`dtD26%9<0>1h(Qbb>-!BUC+ z5est%$HL`TEoWcwF`3eK!w;f{B^Gxgnx7*{gmWnho2|w(&QFZIK^5oqyBusZnUp)D zP59L>_$TJ9eQkT;96mkl>lGno6yG9Va`%eC@oVsRAST|P^Y=?+IP8-P!RAsg|Ks2e zE*3qK8RrVb%vC0U=YCH}nsEYp?7{-oNyMRbjuvMi%`$dnCWgXY8+Yl59Y9Km@pxm& z^$Pt>J70x9l4iPSi|5-3yquIOte4)&a7-w1NAx@5j7Kk*9~JRNw+q*Z&9eRYMr~2m z0Xe(T*GbA%nc?@vo>yAx<9H-KC&Sa7V*ha&qB(WfP%u&M{upw2r|{#UTtI&ghKX83 zIS0XzMU#osqCwo+u(_D1LtUWQt^AAi)aJTBLaLD$IvBD+c(V^9wqaaL( znyZAH64q2)Jm5hQTHZ@RU7824Akr0uA5A8zDkhmVw{T6?@APg1+u^)kK0nYOV_aXD z8+xr_BAAq~8Pn|QxH}F>#NhgZZG0>F;qHhYGFuP72UcZNn$^RXz#wiCLS-*h8*LV0JH&?M!p^=^h|{#V+fELYqO$#4bxw$-CdnN{ajhS`8FsC^be4f?U72xY9eNf%;Up290WA--%2UE}d=_XTzQf9<%=uLS zY0%_!_QAUXnK|#&&*6ETM%$Lz8&q?#9w6OMGH{S`k*K(x5*##J{5_Z=gI*YwXT5`@ zsh%L)T4OjIw;_v2eeYGFxr6&SRjMI%N=a?6Vg{<1w`!?dKAyOb%*k)#k(DZnjR6mM zjS;>rqjM8H95{NfnTN!v{SG>ix$RgeK~M?q&LB=VoklpClG)5)s}kTm=f3hy8ev>D zTYtle7Ws3)d$h9axIq_0m0P0i@d43uSWOq!9wfIvOW+e=k?@bdg$P|#6PXRBgx!Dy zVvjN`BTGSt%3-aa0@fC85m?h~+X*=aIXkk+v!8Rc<0jDS71Fq%x`~^|T2!IlZH@TA zB{yrj)_rGsl^6b(+p9rVx1n3%!ick93S*E%OMcmkl8{Q0rdo^s>T}7|E z2Y!eWmLxYNU4`;m$bzhGAFs*AO=y>J6x3TajL*xrB^8Hd!Yyw%Io=Ai3gm9}&~Ez< zp;A0>^cv2NI%oeKLP^L|Cp435L%$05qP(9i9cRA@Geb$NI9nR~NtC zRt>xV&R|~QHqnRMz9dZF-ZI<_aTocdNAvBQO!?C}ENM?m$mQ?fSnpj{xJSb~c;h}p z4Ar`a2pRK$@AE?hUh6a$3M)u2*z^;^H;Q3<>XF(zqGAe&(NR4_>E_wN9l+iD0Owkf zUs;5(9O0`X3iE}%Rm%HyUNVyoLX;?BladMB-J!6u{dG?>%~IJs9(mT*KxmZ3`UCGi zoSL)VCm=1{=3cG-OHg%!sF=gsg&m0>V;Vu6dN(22xNCeflkSiPe_5d$#qh6s1EMaE z4u;jqFb0Eov>sbvc1M6x@Mc8Uhy8XA)wW1ysQg4OsTFiB+CR&{9Q=-{kmm z(EX<1rlFt_XXw}Msrw)ULHQ}S+2?AQtexjWy2gV~_iu1gUf2}?@W+GSa1WU5lHO$* zC4MR^{zd4|Lb1qWk2*{DsRAntCyns+ESjY7t)%y(b5`;9Kr$j^G=Zclpr>U{?Gd}; z`Xo-Sj&H*TlKMj04!0*RBZ<8Od3mpm0)Cva;C0piH0dOc;q9!AKdzQP8dFfaD@M7} z<`AUs9F5P7CoZ1ArLwa>^Lm-SSE!VsnIJ^?e$!@+419Ux=+mZ*rqe3K46@lKm>9 zmE8s@(;sEl<$;xIM%k2lvyPA}6;d-H4-HR6A_PzOzwlLF!~U5 z-#tBhOf)H7eaGu`Y&w_^d4!beR8_2%)xTLOyTvjs&L_wQPZN2>hJ%E@Qac+qH(+zq zR1WX-xdYFFb{23t4qIP%a)QC~{)Za2l>fYIki<~`SlP8bZ{A7d2~z^Y0gGzNjyAJCM$0hq2;YZ#P+D_gTSlB5q>U@BLvMRaU{N z!}zS?G6H-4f`WOo2|0@@Gzs3*sXOT^R8evoy`|LH_~ZF(j#8pxBo|rAtTYN*9RED^ zGS8Kq4^V9D1IO5dP@6eC?6SxWceWy8;KH7}bkO`(k zRQnPI6}`{GKiWp+J0ud9hffG%33ba%&Umvp$^+BvFJ2tC+JBLATCfwQwyoT6aH+Bi z?!+t`N2m^y1{dk>C?cyi8?DryqnU){G!(rD@zDm@f|39g$-Jlt{RZ4uYYHLU?xR%4 z;9|v_t5}<0k*Yu6jSh7y`q{@i#xY20hE_<{Jxq9Tu})0L>z*G`fmyZwmXU=ZBe_>| z0gX-mmRVgH?)jQ}bHRewAKJruM5o7}ca1r(sS^kI8S(?~nX*k+8qJ=b#ry;}fz`Kn z6O{ZDohEAY9d1?5YnxU%4`)5s7b(;ED9m|7XL=KFKwCMwQuzi1_mqYI7cIMIj zs1+~!E00GGQ_^=0C!e&_!}JfEHns@g9r*Wx683!u9>@yhCel_{b5T`1EAf(^EnMtC zGtJKvOsLBb$?I?)sf2r56ugy)s-XT}+<+j3=D(o%Dw(Ry4q^`d zN?#OQ>)T8G{X4DYQ{;heWKBMwt@GrfmL6xtAqnMQF6b|)ZRs!6cgIanM-O&APNLH% z>aYr%eArzh5#IJ0@!q7MR<9o9e;8Qq?j$gI_%Sq{&mL)wKdS`|Ugh1LwsI@+3@I5# z2ML?axyZ*hBy1l{2WK?E@#NeB4lA+ldpe0#@{nZ=Zs-c)2NRv&u|ERdt4u#Wg6eZ>Ew>bt|) zZr}E`s#>wCHCjdWDcab3)GA$sQp6^<+Ph|=RimY7sl93wdv7s{+B5d3z4wSD!tbNc z`~AMZ_x=ON;W)^>u5q5{b=_hW3m&0@Cv^Z8zh8FV45K@&ak7ofH*km9r69F*SU7fy zW~avTX>grtx&0lpb+*uwbdphsrfh z`0KD@yMfIz(8$x|;AzbKWQ?WhG`9zH-PN<}kq?$u%_6pa6CpKn;RyYcp}%l?z=;A% z?6!Z1{@$sR;wiT&ldj&bnmHfNxF~FyImd4sB%!0X;V0c~?%7Fa*J;%l9hADH*Ef0e z4#$2LHsBJ2dh?}uK&ic&JNcvG#RjCiN^0x;v&5b@#@S}~P4J7Pnr^R$8<@|IvO964 z4q))xSzLO`=*b*>c~QRZeEK|T$Gtv9c_^q_rJfn?<6_sMaX;HyvK?%FR_|r0%qo^YfptsA6`aH%*tZ^2~n z*u}7Nn|qA!&c>whR#m@YD4YhJlJ&s0-r?)o`tPEub@813x<}hfb8LqS|6iv@;9_mQ zye2#5{~*bZ(!LFDd!TOC7R(ogd=#VgOd~ZfjZi!zdcQw~6vTu<%*@DyZ(FbyQrKgs zPcV}R{0{AjlO4_|)nT`P&J*#LiMR5JSHZH7#)^s#_XVT8*YO7mS<^@~7|Mly~p6Y9^9@k4Ntu3wBNdL1vm)|Yu{yJBpu3ywf%$16q zJ8fV`_Pj?7R5TU7=;}c`9p9FP!`iydX1R`lE`9A{^?({5zySi+hOQ-KN1H|a(5K09 zMoNaNS=5UBW8C-{b%yN?onRrYLJ&a++>|BfcW*pU(2CoBPEp z)8c`Et8Us9D3*#v4p9v=7K=*>WD!SmWEOtX$GnRL= zThezd8F=+zepT+`LIzf)Yne;k=j?5at;YNDt0-+V7VLoaghL7kvOuJKV@fB6`%C>r zqjw%(+FYg9Ej}Oo1dDBoC6}%Hs_T^WK{Fby)-_Su(0Qs2OMa_6`d+`Mn%N6N91H!# z?>{z79j{!N`NT-Oe)35A+tQT24z^@C_QyXPtFsQdyUVRXJPaEvoUn~aX!_U16)M3_ zI1*pj&wLVS7Y)z6mXSsJyt^J_u(3+i z3N)yjgyj^no*9nVa<181j2Wv;!Hz~Q*<~j^YAEgZ_5R{|`!L~NJR8!9+UQMyVj;KqF%%-_FloyOTWtl6Ntk|3dvm8!X_qI#hrSY;+Hg!oC>6=(;j0Dwy_HIh>+P;7XDtHo42&6raeU5ov=lLcvqUoxYf9H3OH*DyQHcd1uFy znmceF1!9+q2l4w52u94APA=LSYbaAhK@icYXiS_<-buwMSH+)syQ9|o(O8GNhQ^-b zd0zN4=f?5p>KXhEyzpbW^uBqRUWugfXrNVNVBjH}vD%mg=WIGo|9?**;5;c?0%WV8 z-2@yaH&8VyIOHz$W?v3<0s zdsT?hemjCZ64m7MvD9gHtQU*jj0uB?g!XuJMAx|@?Z??tafaF0f)%iyg!YB{wvJsz zL_2Y4#_`RBmv7;7;MGU|FY>lwxQa=XBI9Dmyw7B>GB4oYhx`AdL=J9G?kxaWB69VmuR{7 z^Qa}EjjeSN&MFJ&PIM>_ci317D(kae&2#5fvOxDbb5EP*vhkq6dtM4J+Un=5MF-JW zIdm=Dvof3OtK0gG1umfG&HLWjF_I^=)Q8rAjf8Rz^PLfYStS0{s5RnuSi&mrm|W0A z{pXPwot*(Z*qH%ryaJD!q!~P9N$!$L}x|wkocEOTpJWylr-L zbGbbAV+D1*;#JHE&QPOtzo_M~T2)hRH|?YGQ&INv!Gx~&o@>2H&h$qg0>hsC-;p%#2@GB zHn-j_k_wxcWuFaCrJBH(FQk9t{67KM72pg}`Rhj)JhapdDzGKT3STRDZj#f_3(E&} z+A&N$+7-V@4{dG{UQJ;FQwAcPX(FvOH*VEaFp8p)(>0E1^T3a?Oz&1rY$Py@c7JlVh9v~x zf*q7%9r8F>@7vq3_DcTrm#~7R?%u#HTLOU(EXzkN2yd9W(}sd{rpk~GL(x%D~%%&0TxO@xoZ8v4t^Y7JaA@x8P z(frwh+iX2npolOV`~74!4vTl&8FPK=zVq17-F8m1er{e`E2{jzIZ@!1C=fvLDj+2v zSeiWNnJ6N&isRCvo6QmSSBB0yOq33WL8wuLEvl|s@11VBcTVfe15)THw~2_~_}x19 zy6sHX@hF(#P%IT4;K}OY$E1ug)`;B=!LAeJqKQW@i~S=VM(uz-A2<1ZTCx}1PwZN$ z12g^V`q|OPCTfC|L6m{I!W|2!{!iaFB+mHRr|+=q&`jkFr0{I7c9_l7rqDxg3e)fS zoR|4#pCT|(;gN^4ZZA*!NYp~GDpw;#vaZ0Zkc|vc3rb`!0yn;+Uqu=uWTdvKmJ3qp zvUenijUu}^0Q_kYe2d4hUL%_Og4=V%L8_2&sD0PqhQZ!(jDuG+;TaC9NBS% z8-)6z7rT3(!|0R+T+Cs7(>USLV%K3&Z%Y8+^%H+I7e>#qX-0L++1aOq%4=LkKLtR{ zmFvQJ@^kRi!yilH_+ED6I?}*WlNmIorId~Z87B01q3`=q1=rcG9U|$O?kRWlZuI2T zYAukAbN$>v*^qN0EgFx|4zG8x{n#0XKekU#e`a?obg-z`yStnuQ~78x`|KIJsc7Cd zL+@$M>A=;kHX4H=Y}t`ppS~MEvH3pf=3g^P$}S4Xv*DUjHzPL_K#V-P<=QV-N-_lgbI};MxI)N}WN%$l>S}I#Zh%$! zr`9oEZJ-f?R*&!NyZyu1ebP+~gh@^cOIj=h(vpWp zZoj^8-2aDB)GmUTuy|a0nIF5H(mQxV_7U*9B+{uO`!{1UM~-&(?sMBt+bJFNn9kg%L?(pbyf#`Vd#Ueidca6t(4Ux$7 zw6eRhAi>-lUs2(b{hzfWIuZnYjwnsT|_sFHs*kVR3>??sd2F{!ySzAnw#$VmQo9vb00wJnop;Fo7huYv?KYZkSWf_Crn zYJ0q=<@!o*6Y(>)P}$P_SFCQ~z_tVY-9Y=Z0otoG?gdDd0z=*eMkGi4k;_EM2AbsI z;Xux722wZfO1k`VwQ$o(A?xh+m0(WP!}L^Gh6QJ2-&S@T%k@@-eD0I@p`T?E+8-gAWIDI zd19+PI@eUW1h)#CVI3#iQ^7yyEA90DYpZ{<0Bzc8pZxedoSG|OOY3JcYGFRBbmL1L zHB00pF*-R{dxZ()z2u@bEe*c2KTmBSx9c!@KstwNfWuJ(Dwv&0mh5>sdh%)a6b@Xn zW;W)T7-MCv(LAy}^V=)nj`8Am%VNl`i)h^?hn!W)RB5=$y|omzRw|4Cu#oo$`yhK= zr;;xecNbPN5TuZ2b8@BgfSM6Qg^`2nx9}H!iGBoSZ%vl4)gjSj0VC;VH8$7hdGYXx zYA2=9%GFI63bR`ZfMeYSRTmoU3oA~p6A@{0sTxuAa0dfMiQgN2-g}mXq38boA|&P{>)|oi9xX*o`TMTuUQ-Rh z|3)qFzrZEy`>bd;Vd%H6wJtl>CsEMDyp~Um{HP53oUUx}Oq4k_PzBhCul_JUu}Dfz zpy{+$m>~rKiggqS9G{_@B836Tz<^{ylIn@%6nP(0o}I zTZS9SuN}xI2NC}lk^E(iWGT!x7X@EZ6<%2Kh1_xo$ZGLyt1>ItfJ`hbQ$Rrm1G|?< zPrwyXh1>M(QcFLxY3+c(*#71e41UzT3?!Qhed+k^XxRB_wlw3U8|2AFe>egOrz+$j z_7j=q#hsUI-1DInY)IA^?0ZdKd6nqHyr6(XG8bU7>3H-d%TrsX0k^LsQJdz0vYVot zsQ3K(jrBj5Y7GP!W*&k06zSLW!~yeA6NcVbn6FfNSaaXdLfh=CuA}X=?$Be=s$B$r zi`&=b+}abrao9n@5S_}SXAASj^hciWR_)HFs+b&hd1f_Sz+dPoH_Or6k3gwHzvl<1 zm|YxQkx!q+FZi-=zdFuZ4v`4XXl`H*?2q`9p5v)DmfITWNz%dfMwiKY*okjndJqHo zD%`$lFqdkbI<v?=&tpTm?! zP7dY^F!oan6ML=K&&?1$`w1wgE315e0K>Le>f5*OaL-VuS8%s~!2UK~Jmasrvja== zHI^@Sw8AIu{vaTO15#5WNY;Ll?wg45JRf!aIgm4ux$)7SmtwZ<#GCc{l9E6X?hJul z1*h;jAp`bthAF{p+d9|Fa0^0uI)FL%+nj3ljlfw z{3^NlGrKE@_e>86R$I-5TTZ;`gVq>A} zZT+N)!qteBIGZx(Gfq`2G`jYRE_Yvez!xJrio|h_p64zIElQqf$-Yndm71CZDKjKD z3{;OG7vuK$yfJz`LPSSc!tS6sWVv}u+VMmS%6sTNmy=o0kx(a@b2ph32<$%f1(4D6 z>Z!^jRrEs?uSRK(f`mLT?C6e?lWTr!bv*bIyMC zD{C^}IHld)-lh%&Db9#LO&a4)z2A{TcGtpKhe>(_%GCNQq1Stq67V`oAUduend#l; zW@gDoCiaPRb!_SM+|j)WB3=LWJw^kkC|d~>!CQU@^YrCp$`zuzJ^~iom-N)k8;|)~ zWjEJFCbo7DI59D7td9*Q3(3^mkUmxeeAhQ0-OLIgxT+Qm>o&3eeiu^whmF2LMfyCq zpKj`n_(=qDD_K<+!s!CcJ!?buG zBZ1t$9CP8`U=ghaI z0J5%c5i*m{9!(p}#H|EeoP%m%Y4L*N@zR_O8u7sdYY1Iw7*~3G#PO}c~n8YJ! zO7`TBDvo*ZfxPX|tAsPbf;aTIlz`orJ+B#pgdTHrdK+L1H;>$=a3?;qCVU!5o{k4u zly^5iB%zqjB=z@eiteDv+}fG|C_YbnLBf4u(-0c)+-^vQsr?6adk^yJ3MNuwmb+vN zadh8}Oo`oP?#>Uisb&&yu2ODU4SOJqcOnZAmJBZ;X={z{12a#7fjuF3(Jz(uGY|iD zID{?&+c02pu~Mu=YgaJd9l$vQBX&Kyqm{4r+yRbCm3eAehGK#ldgJ*cY=s-mWTcSL z;~{Ny5x>v7j@>akz^tAe3sOovC`jV~ikYLAA@U!f37xsFfAU{Pqo2~WSiIaAOV}MA zz%1!2_P1C{XOG=E*}5ZnFCI$Lh!7xhEV=-Y6Dl&>-z*=wom!RN|bj|7Jx5N9+4 z=S_~G62IOJ$y(jguh49rS#0s1|tM=(e z`i1NpiF1a#29@Pf4piNMf7F--d2Xg8s&WP8<7swpyX`VW_Cgv2%wyHvm~hhM&$Qg~ zDTxA4;#;zDh%w9u$_T~#A!3pg{lGJ3Yn&j!A_DDb%9fO=s`plcX~T_k|K-I7=%-;~ zY{15s^^=fa+Ci97NBL!x{`gvLDPJ^|S=6(FV6q=k&C*pWoY(4nLe_UA-o=haf%Fdl z#F7!$_Ftx0ZkP`m=^h!nOH>oBe(_szTpBwX=u*mrdj z%1|TSgT1X_-lxk+D8a)l&3yFB5PVTb$b>%5(h!+H=k~ z$#ryU1{mgA$ET=8ZZidZy`qp*ROXfa$nfqbJBj-BywzOJ82e$<-F1Y2BeckBV^E8R z{3-k4>Iv%1MJs6U&!r>0b^mB=158)gyVA!UvxU5eYix7Lu6c2>fzOCh7(F!PX((8E z3V(dPly4hsZpWgr6Rz%43xukc0_E=`isH-YqQ{-FQGxXQ4}m?xp2<6J^P2hDQBldapA_gv zb|N7DO6?g^ZW%ED76b8DWSWWonBLJTj_ezj0J-ecgA2ijE-rWbLg=`(AH^D61_`vB z%mzLGO3q|?_r2wb27Kvwbssjl7IO&YZQ(sOm^jkYB{CoRi(LjD+PEE-8tGBQ-M zEWKY{o@PuQMjI6R<0HUqW;%Di*o_SL-6(4_bnuzs-?mF%_SDr}=&EAB`2}lZBN5)) zm(DqrTW>zjVF)9?rAQ|Yx>CjUO;e*-&}zJAG;Euh=^EfCp1;!=Pzg7kp6Y*r6*t6x zf}&dnGa(@&%=#A)?A{Aspc~D3Sq9aM;=Qvnb8|de>mo#oAKVmAt~UBblFLd1w7e>G zZM`)uqTq9WOid-LS=`r1SfO}lvL;4Zn_1jW5No-c1sr=NKPtMguhbTs^!7Z{iMHc= z7a9KrD~?)P0)XDwKj(U2#j0k9u|##i(^vy9+DWs{yazpb45GO0yFa<{e|T3$odmIi z(k^}N^b1DosY9iez=Vl`#Hy)97S7dVcp#Z=#gv`G?|*PeZ`PE@NGjx8BKN0|5dMkE z%lg)%vlWWDsJ}7I_;{8s)wJ}k0o>v(e26`JAnKtq&2928DC*OrLz-QML0aMC7j5g2 z^(_|nBtp?t!(>s`wz`@=6^ePJvIkfVG%ftKK*OyoL`__*+T*Aj|CZQ)Fia2)KfOY9 zlyEEhMZ1s_=@o+|&VB3!Xg5#Q;2wMaf|1A^`!YWNbIO+q?m=TNh01I~&MRWK@qFBa08(B zJyJ2E0f$p5A0>KUH=?ylUFxo)+l;O68wGv}(=;B^PTJ0f^`~KjT}L(JcoWRXyJNJS zife3+2a->NKe1nbU zjP2%y@O6;eH>G1?8C=5&dv`a*}U@{vlQgqMKlPvrAEQuV#rnT7g{?fV%T9B@n=cU(25*SjgL)% zVMtjU@~`(Illn&}b4aK{fY#vT*O7;LO{tx$hT%cuM~%@8pDC0%IH22?C@l9i5GmDv z>-LF?IWMW4Oz;?1HX8Xr%RU(YC{>U}Q?M4Nr+G!SOVxnf#B} zg$w~#F7Q^KtG zmce@!uCG-~`BK*EqZEh(O70jRygdkZADT{{<)$iR1KM0SI*3&{Jne2|vtwh-(7d4n zgAN5+3?Y6E=+M=dG2OHAJx^L?W}C@a73G{=tNRvecM?$BNL_r}s%M04^_Id;QfUp? zB(^G5J|O})b>yeQbC4mcIXQ z0*?_>vsuaWu*nBgH7-Gt2MHH6M8Gf9JBk418H4oYk8MRaJ1}b9xyYpyrne++5qMtyxSZxI5N`Ed_Q9 z73)gIV(l7GE}JJ%^w@aA!MVVYqdgJP04F4d=O1mcYt#36m<)LS8kNg0xBrT#XbSU| zXeZ=U`!k^HuByFjzvAsxIr@~yoy{lm!CSQd7;0{(%UhcaAvX2(#iAxvWr8FOcMI!w zT5VV#SVj^VKYkx;ao>h;0*l|eUrRMZ*AnfO@LIr^)#F{6)7vYkQm>R>KYym=`8;G3 z`F8E)3ldyDv$}#BTv}iEIo?#f}ArexuRE>EEK;>2&@f z77flDodEw>A+}&Wf^O;UB>9t}YqmYU)PsfTIoXlcZVx+4n^JX?Ul%Z#I5(@247U&o zlXh5tRJuUQ1xfsP=lz{asqRO8`!^#v%7^xkDs8d0w6+|Dtm(^$JrlL0w;BABcSAt; z_C4D?OniT!K(=b;13}uYK1lg8xrB|`64nRc3a6qz6FapdKEqe@)J%gP`tH+F5|x@r)Np$K0CCV`t~rNR65~m9L{hbqO)nk z9WD&z8O+(s4~4)W_wFeXN3hpimwMkhtqZ-PHiW-LMC3a6yWW#4_35wa>00rBHOIgl z-U+~nT;FPD@?OhFb~!g{82?#N&(dWd8x^oHcaw|sFFW`aWf)eO>z5+)k{7iZ)6Lzl zQ=ymCjmabbwoHgZwj@L6i?o^EVmd7dc4P^{m{Ay{wXOOvEOwINI07~4YTLD`Ja#2| z0974h-LcD~{PEXO?ZE@VSXwO6&LU8}ntx%PXV3`+=_zz$W~x3xlIMGqey+7hc%ei<*i` z!c9~|o#d)c&}&}8lBG>Z%JfH-8Pw@|R9@o ?nsIV%)|6`(8K>fU%*qQY+tzQ6ksKoqd_gF+o)Sl~);thEZyNfb1zQ86ebR$*9 z`mXIzB*Vc_)Q!W#Q`t0|swjV2`0qk>qd_xc2`Cv~V%kOwI{i-AH$#%a1s@>{8|=FH z{d7)JMa=>gjMUA?(Q`H71<@3X866d3F#OX5Cc(kjaY{xLF;$XR34VF=o7RSmJNSX- z0Uv#&UdKNLW~?KcaPoUz0H9d!*-be|1`O z4AR~wWt2NsGKqjd>!5vaq8oPL<2hx;p-|-d)TjL{E6@(8yKZpWrsuJrMjXVgZmT`$ z3+Q?yb`s^>ViW$j#eA|?=Il#;%_&(8@Xm`l`>Bi6p*!Ud`6(H3_vELevm7x995Oio zuP?Uczi9a6Rl~uH&f~h`#VQBfvwmFZ%30ac2Lgs?cJ6eBCkw5~ zP;ZMfxQm_l^z`8{B*yTFdBO$LQI#^$Jq54UvGBXV^dj90;!1p}lmbvN=Z1RU54k`Y zpQ}2v2*l3|Eh_!m(x*T~Fn3&oX4bj+*M^YQM=_QiYqGTEs8x-W=K<47h97yU_7zH{ zPGsOr{KuPWG2hCys0yL0MeRA@y4VUcjmLvWe7bAIsT8oy^AS1ck^om~4b=g->Col@ z?Nvf4ntd|_oq07>VM-Yc+c+GnHQ+ADznUmLpaW$m&y|O#XsqtfkAA;(yLP^?AxB`M zbfQtjs(Y;IXzHkpRVp4zM5M|SRBzUqV`T66Y-k(cL;Fss{@SJ{+r6kEiK3sFCN z3Vf|Mctp-;=6ljV(Gfi-xzXf>$DJL#37le;5y7EeiI12tj?9FTc^u>FGw7o`w5L6& z{(f`Y`k3bn&i!7qwr6i#s|Bd??xF2(M2KMC-jj<3D8&_S*twoxO)EjdXMM<=EBs&R z)?y}9bcTyw-^cS*61^7#+FCP8&8vRj>n7Tjl5Smc@JH&uQ}#8koCTg87J(&_|QYj zI)mVOATkU+yDut&@bO+B3hDb!eB!Hix&@w@8Db_DWhpGpNCAmnx8Di#9e(OA{%Cba zSSxD(^d#1uOt4SLy-%J>Ts5r7FF=r$%CxpA*PGISwqYT0r_m}rfu-(Y;@IqH0hu?? z*4m`+Ac&G19NwpqU|*O#!CW^{&FivLBY9Af9n^16IW0(h#Cgg2tP4BA*NQxNt(p`Y zR9v+96Y1qYMO?elt^R8{h#-E}Sh3-kxc;V!)V3-x?Xhx}y|$#iksIt&8WpM*|YThB2v)v7}t zYH^G-{FX~$m+~Eoaosv{Zby7ya6M956s9LI?sp5W!b^=Q7wt?UJfZaQ`awd=yvjIM z7}-4lQs>fYv%h36?2SGDCw-%0o)8l$uy&T%TJX=2*^0^UDOUJmb(2^r9I=P~ z4>BC;rpH#B^=YN_2&dGs6ojGPiK9t7^7!>l(Aalq8fQN%YYsgSQao+66};9fH}^av z2)gYvUbgXOEhe_SXkPbNdYN%19v7Rj;iJ>(7`kpF@kTRgPAg5xoNHrP50gk5fy>nBNbeY~+Gsx0Mmv_OA9K5=ttcy5e=>u9b>w}Jg8HB&C~Zr>gH zj059W^w+yT1td@H$h+HL7}T8a=SvVW_(TMmes28=ARLu8((Ai=F4hqAJ$N;vxK`5^ z!@bQtcKxzFi&g6Y*R|Zm`)hUOg>|P5QETWWCKGNuu6?~?I_Qy5mYgfNhgQzO*mF^P zsxK?lpC~n4FMv9xH(I3<=aWj8nn6pO_mW_7yMI&bmvZp%ghwB;PGO*DW5X3nAMqfL ziev);g&*ob<}6MXZJn86Px#AI`yCe-VAb|5 znQkG=K`4ajsiZn&(yKcrl)am`sOB`-34_w$CIMb>ONQQ;xvk%{-v(Xl2|yQaw?uR+ zPpdT}ly>#KcdH>%b>9;8PR3lici#l)^_{p=c^jOrj~+C|OQ+!Da?LA_)#|I19^OE9 zxMj)56_%PIl7=a=YrXj#T-ftmuey+QKO{CNcWRBz*KXtjsYe~KIUK8Xn9KGA>ym5v zRQLIBI5gV-9QmXPZQ}Cb#g47hba}-YUo-;pUMAt%v{a$Mx(LPhn#jy}_yyZdp?fd` z6@vuI(kPG=WXQ0l0a2Ji13R7ptT6Y?%E}l)u1@-F{krMUd~hpuTKYOYSk|i4R3pzi zd7&Xq<=r9v!9-D`S%p7dXglHU{OFl<0SSj2wW{j})$B(J$OlMxf#Dl*sdm^Dllu%n zXj{67cs0`OG;SU?r zg$3@XQ};i-@8&>LRSy4VN*|uhgU6KF9xt~bc+6Ia)p(3Vq8>yRQLjylzp1D?N}SQd z9mSyz%lBpp(ouDl`_(pX1g9~;W7F2!WZk_s(k)OYUHxHo&>&}E{CSWR3U}=&iHv+_d!?cg&Hi_Al9)rsSrgk}~syo@cP^of4H$dVh?XIh1-nb|z3ESl; z0!nQ=K{iA;qrp_t?K3}5_G+w@b9hX{^K?R1oWBP5zv(Mo}7lH3H=gTG=PNXaCY&zx!KkYpxB68_nTXVi^!J~27 zez70aw&}{vXrOitD6kPtlkuFdTjtJA;Q;7v&b!y`J|9i^N0HX{$vizv8 zle`5h@2peYP)F_+-*lr3>-JMn95e5gy%F7wBd9~@11YAB!)~%{2+R`m$u5U%e1KHS z-O1>mEy=cQjjUp;nRyJpTq@9-n^h43%qKS=soZ$J!uxp=CQ<`+N(9-ifN zIU}sbmmoh+xS~!@UrQZ*5l3F7Uu;N*9qKNOh|6gG?ygjNBZN|n8CN%~x1b42$xswsM~HZ2du|L^X2AlAT| z^oM!Axqok}(dkxAhy8p5zO1Fw!w62!8r@CYf3_P`9qd(@mIC7$uUQ%vWN933ZtJWh ze3*2Y5GtG@0fi%@*%}W*U@0dDUFw-JIqyF}4uF1#uask>cT0Val#v@Zf!(3Qc}q{5ep5%+ZjxS{3PY(~?)?1hg2bd1V?Z@hyH@ z-oys-HoU6*SOXWSfjYW5bk_K8T;`(iP+-;xCc1JN>5{vkTUO6!o7>cWH!S!C@EDzj zxkwA_K_1n4x6Cl=RoMeD128@ys<=S0%7K(I;@vU4Nv?baw^2flH6uIr3VXdIrF~Kt zx1A5Ed>;xbleRFA2mDzm4BVXdggXuy%>B*yvOYzr$H|PrWbD0?S7=cF$C>n~Y=&3# z%=ITAdU`Z3!DXp(6W`5Lwy`BFRq*Df(fiA6VXLd(7YanxWy^6WZ`FOzQZy-CB=3nU z`bb=K&`JWzkhmvV@#m+~<88~;rJql=^3V~v6*0BgAem^J#uJqxPJ0AwJb6xj>ToJ| ze6IRRqjS*YIkvRP^N)REiyx9HF5*qpRhQc{-pg-#@otiuNmFLV-I{qm_ok0$hc5d+ z8u2i!khx`_JGr#AxM9xZ8jrVqBYg?yc7WNZ`3+QxCg#7p2t54#y7oa69|`$P1Mnc= z-}NH*L);9)cG=hb4JtK@;*JEm8uos_G^ladSgi8FzPq7b`yd{aOM#TShdl131=OKm z4}<3pk046!y6n>dl>LN193l9-P1Bb}|5S|W#?O?u~HldK0y4-W>DR)6&qe*Yec{++xN zF`0xDL=)a;dRf%yyXv%@Ph((!3I6vm#Gp>aLm_uy@Lr08(JJ4c9&Ym}1FVxTtHo+x z(m}$j_Inr3U#S=;+ICMpdmk z(wSWl4LL_qmHU=dmjb3%aTV49qJDGd&Mv+{I+mrT^EMymRfR}a^?VCo z#ke^riLEXnS+Zz%yGGrn?7ymD%iL*;8t7IrXKD3Vvu!m;^^I;W74~oI=L24iVKF}nPJHEo9xha2_TQ+NHJiOr~h?eoSzOTq!aS) zh)zMtW0cb;7jJej-6~wa%*6|9cAg6SW?oAVj%aZ0N(iIq)_rQ{cl!F&*RK_?{uVd}-Dj%n`sAnVgRb@{w<33$FiXh~MP4(` z&h&75%{5vk$T~hcZS4X}I2q1HU;J zDur^e2U_OIWS_FINysA6V4|>eO@6*S>hWCNOsm*`8ValoP4t`)@tl-z9DN!aaF*f) z?3D)0k8sk{#l}j7Se#5}sOWJ&(9ML;Hk#}1?A$v&k=o}kUk#%?8=P!i-qsW|0>^He z3X!Hw#!f{;DNoW$0-}H0@UPK162@3UW`XrGs+P=~9_v-~kElf8`!6{*Vd&SRT54h_kUy%TuVe8O6^!Z>lUs0KR{{)^r;_ z-;kj0PFVK%s>1sTzB=h`Ug!h`NzwZy_2^f_0l)vTO{CY>vV>K+kOs4uonJ}9Az zB0Drn8(D1}ZG3&dKDSU_iez!7sNyi(rQX}kbQP>RTXT}!c=;Sl;77dMA3u^1Gi@N} zw6wL|@b!j+^&JODZX|$dDZQE;=UduJJr865GH;hg4fh=s%LHxk-H5~_kMQsG@6{Xn zck6$!X>C_#9a)pp#mMY2#oYuxAS~x$)#^@o!U%uu#&n?u!u5#Z%`e{E&E8)EBQKun z@C}jN+pV@ald*KY@|?+My11kc3pB$;*=po=*eK0U5<*3%Nl`8jnu5q1IKxZeO zLvMY6c6@(q^Qup6_3!q(WEt4=)8d~XyV^{j4i{NES-0l?nnx&17ENKjptoi;FHEtT zn!)+<`ES5ygy~ej`||T?ps>+e^hu@6r4IsYS%+$JOgNvI*QfVF+X|0sp$j#7KTHAH|SqbKaop~=;ApYT&{k9YP6pW`* z@Td7;0(q_^7?@-1Q6Dpgqyw>(Rox@YHM}C#4H!B4_%%BTnM`mTUMHpE!hlY%DUwBt05fjdn83#4SZ9C*|M_-(WhSi8TjKX`X4S=-j0PWBH5Aw z48CGHVMd_WrVmCdUFV`doE$P;0jn0PNA1_iFu2=un6j_9Bd6h>I}_PJuSA|k$(w_Q zQ;qg4Ev`j7jqhq<pt;B6|zQleE2a%bj<-%X7xX z*N+UB?6cw-X{dI84WQoso-4|`(QMxhWE}v_!-MXZn`349gao$`x+qpfn}V(B!H5Yy z+biWbe0yU; z-9A2{r+k|F|494Gs3`yMdmIK7Q9x0U5+xK+=@OJ01qEpYhVGQ^PDyD5=@^inp^=Uu zq+^ioksP{fh~a;YzQ4cEx*y-yBiDk3uRiDOv(G-zvsJ`O0;(xj^T$-WZS+Q3eQX91 zt4<33Bb^ILn9p3}g+ktA(Y}l2wg?6HgSaqJks;CiO~kit$F=5KE)C_cw(c%hbE^ps z;}gEt6wPVGl(E8pM(9{dMy>!%9?k(JJ!dMNd*gLBYVYWDu#S3Fom)D0rMu)b@au9X zY`>%(U;Fs*&rV`?l$(K@?&Vp7=R$JGWA&Rh$4yJ;5RedBcz$?AKWi5$i-h$}0}KMP5ikkf)ier=4oNF9 zX>*#vlWLQP-gfcJUa#P3L=^mB>&Ge_+8b^DiMp2=h8$j*3L$fg%>adcC@~{j@f^(48hFt1hw5+O(2|BtvQn#X`t~@k@Fq4Y%$*$fQsLC|G?`sgfAS&y z5pKJ%+hlZ@-3CAgc#54G&YCR;m+P{9zN>cB7=9l|?OB*%p)%#i)SWsk{}cbCE@ZS@d86O<%bEIdRg=y7DE1~e|A;(%ku0`O$<;mp6v)a!vd zM@`{l{dp_LFKq&tXi(Y_ro>?(+t<>)VjDw18=I|n%Ab5kP5S_&Nwvp0i2gczvLw_c zV$(>!;D-Clr^0#Dbc(O~ou5HV=^bt`Kx!DV|Nlcnp~G%zA~1=?NA7jAB*;`?3ViJ| z0(w}=(a4u>;a_JMIxEs^;*gF_%DK)umpTP#F(3Oq-_=p+*V1Kww>ju~eS<_n@R!>* zC4f$IitZdy7eZ0fEEjy5N3Ey%bajLQs1i|UiCx^9h*C&&;ot{>{-aBmc5K3_F3tA1 zU-r1!-pFVD!|uzBO2E9xxBi-StFw1J4QvpoUzoT)j>+T&h9)Wvlo0?GHjlY|aabwq znPT!u*|v-~tDo>FkIx;3YreG`TYugwV0%_`C)yeJSGMlH7>0P2c+}mt-Io6sa`K`X&82OkfaIR9~a81tcRvvAfYHHtx zDOdgKv0D(NTyy9nN2owN-LFQRjW5o1oEv4#%6=?5ts3Put(A$JdHFOL)Gy3rLrn_Q ziglmi1mF1YLfx2+tvINU}d+XJNCr5M8fXiKA@R(!K72nH}3IMw`tyx*JYk zRTl$|IsJMrI<5i{rhN)N^{Vxe=yZ{$B)~iISUFqL)d(C6wy#?Zt+jA3anOPj?B*>6 zE=9d4+3w}ke%{^-zT>>mW{@GN&jA#nU{~0&r4dbJJmdsbKPyGT!N5J?<>9X`1jc>j zxwHF(ZSJ6$1|t{s=71@(k48ZP2#=A%Jj~IWpGc@E`hGMno~}5m9q-!m)NcgM)C_2= zlL1AUe=e?3{O*I5_Z}S016xG7&cEt|58LE@*CGDvq%&NZ=M!4aFwHWNHQTnKo1Pr?~Q(|Ul5Uw z-R=sfyN&h5*u*UT-;gSmKgv4c)MqX3KyCMOq~s=XVkE~mAAa9?;VFPHqtC)|(Vw{7 zKO#?hG+*O!Ulce12ta!suVu*IcmH6`LIVu+oY#Bxye$Yg_>jlw>5&x-!K!JMfZDoE z`-fCxb?eD8bNT*KNLSrnzkG<6yX3X!y*1bWvo=WrzGWB`(nrBV%sN#I56ugNEJp#@ zkbmBOP|_HCURwd^cZ-mcYqr8Ul})GO^%PJH<12Z(IzZFN0wfLltFZ?YKz~)CGslx) zy*gc=eaL3+OcHYHHw=IhjyikH(k5!XO9HpGsVq4NDuTT;P`#IfV|H6AbzJr%@I!#L zFz?5H3vi*adz*AaLSnUte##Z%QM(5Oo>TTOm#6-;MfqaK19#EG^8UKlh6~jmd#&z1 zet{5PHt+DXEVi1f*Kw)V97e1T&=q;!0f3wRDAy|m^fWlJH;laR<{S#o7m%iMSSMet zsqqkj16+Xy055rflSHIF^xYR_5CbTjVE2i^EaIE{;jK+y%qw<*%WNR>z3K*YJAoQD zT5g=gzgzT&ni{{if`riH;;adjw51S%os$IlxR>W_CfkaHGC7&4m#gfs5g?L~JL@6;Da-`-{)LCRmTy^086Sa+ z&bEOG<+TT8WA-KiGY3f5BvB(TKsCK|&8y4Jw==lmyrmG}Edjn&QcU3AC_XqX%fUdQ zcy_zoVI0_d-VDq`4Oax`aueNoj_}z~sc}+7zmW!zU(DO2ysp!o-!FkD5P^K^E?Z?X zI5};vAD{iaGU))iPRIq*fKCsu=w_J{udWi8ze(~-gA9D~^O1iWTHve_9u~wCXugvK zxEa!be%;O z#94F9x-P!lO|Wr9VF1{-(e2phXh~@03Cb(S{#xKN8uVSDd8gZW#tgHdiM~Mi%AHSD z7i$??Z9Txf1+eLUsq9+<2QHZ8-#^>-Mb5b!?%-rzU!KEhNu4?Y240uoU-=)dW%agd zffWCsD470htAhW0eTU+4d4TduX<7ItWZeL}B!CHZU<;r1zSuqt1jwa=^*>MM=1wm| zh*Q5kz=L6bjzd3YQ8f)WYtj37)6b{E5p@qYF938G`x8F~$=b4pP9lo>l_bQ~5Ys=O zg=?$Y7KtSRR(Q{-0>AsO*(~v!gbzCzq5eU;uTjrBHp><={{41cu@*lt1Gv>Q?XTvG z3V#KB^t(UR*&S$q+;c~Ws;lmt01&k|S|&s2c|g#;(|@hgx-~3Kx}vu6aMnm>s%^fh zTIK^q48YTj9hLAYkn?dwU`F=PyVt-v_M_nAwWUN98P4aSk;uEA|EY&%#+64#cZW%e z1jCbn3Xp#MW&vATGX47h7VGUyp|6+r0s3yKkkb4a2vb&#bZf-|i~jlyfB&DPO6?%F zHZ~lti&=Phq2|}*w<~`i1Dt6<$)?cS5m&rRjZ1j7Ah8?LjIQlm@-DNpvH}P{*smX~ zRb$1%Ldl`Q!-C^CKU3jebmq#Q*dGs_4A3;I1rj)l>VKYrznoX;8DapcRIuOu zI{k31*b#hUji=N&9_^e#vSs&qL+u#Od&5@xmJ12Cp2M3^y^Hx*wxZvpFoq-BCi!LT zUTW#t=E(i&U_ZQUhk|) z9KyiqxqrBvl#O|-TBOb&^lbB6<^iI%*BVWi8Las2yd8WUVknxUYNIeCclz>Bo@^y! zmK*R=iIzus5y^+Rxm<8bK$|cje+AK3{)#6av^j|4OmDED3w7LH>Ef#rm^u{fw_QKI zCsHc{M_8xnaSke@r$FN88tEQuSq%sfmXO9gUOj!Cp87m#5*yGq^5YYydTj>#5_R=6 z?fiFElfDwwMMR1ysSInD*?nWPy6#UEfWU7P;f#!0RE7C=*&v(cs%@tFcc*?>-ljBE zt&=wMJ;}HI9;fRodpp@Zz0K1eL`Nu11c~ojZmnF5^Pr;{hug{BkQ7`qhH`y}#+V6) z3}2pzxFF?3?H4Xqt={$w?3(8{W+mz6E7x*H6o1qVRUCjIo4iWJEskUKiN8q|Naw>R z-c?Ly5T=DPcBz;_(?igah|+vic__rvGLmpTrP&LEI>&-kiFIXv&O@$YOQhZN+IJ}g zAO5u+I5^4+cw8m13^LBZ@2E@MOk%Q`8ZGt0ka4SjmL=tJT*!F3Xk=X=w;J%#26WA0 zu>0M%iy-Leg39gyv|njE5HfnS?|#Aka7>#4v5%W8ayluuN-6+XyKe7-{Yf?_ra0e& zd<`7ZbJMGe3+WeZI_;8rU6N8`WwY5=GsdbSEt=S&u>W#4V170oITwyPeSdl>{fOlV z5rlcrbB$9^%e-BKLMFqoTO_(;%VlVR!O)e&BfKz{PvDTgRI0;WQ>bC$IZk zvBD9Z5-gasI?#c>>PPz2HqLLLM+~%ez&v*&J9`6sd`sydzR`XRIQmpSU3MPI>g4-n z&4?xj1VveYs{e-r0`HXpAX#0x7_h4YR;ebdTDD4l62`zQmGEMq9kgFdb$JM3x|{Nf z%`=6~JPoWa<8Qg}QHw7<@tDTm2jE zBzLN7`E4BZ_`olZ7|s$x-bd!dvn-`aI-2`{oaUiVsfu8qHWc&kMN*_*jc;A} z04@a3<;CxvWPF_t>PGP2G%n2yBi8L(Qsqly)l6t%`ER*#+N10kJa&?RYQ2oBqJTIH zXNJI4rguy|(2jM*L`|_VQ){Q&F|V2uP+U@<*=`~a9HG*|Pg(rbnTKLGJnQQ2D>@ur zc!r2RyQ?=XZ_vEuh%O-xiKqEcaqd!;2R%wD6@A_uY4=r1Z_J^wevazlF^Ufi79w|1 zA!kY5DXrsyBLt8B+^=;yD3=AnoD4?PgMCjH4j~LceFT-UkOqajCV;>GQl4io^=W6R zJ5QmxS>nL(pZx%)@Ne`8d3W(g(op>19E#gA0f1a(vS72vSQdk~3Om<#4&RxdDz@%c zNaxo|AFeU-L3Q7pa0;hiROX&bOpavFOE$zFVRo1tf~n6{3Eq$>1QGTL^Ds-l@NRW zw6XdPX--STZbpYgcb7 z@;xH0&hV2_vCnb+Q9(~X_jGRcxy1%=Gox8DggH{Ry)4vCU4`4J6g}2uGAMr-iH?$6 zv`(qd{f3^RUre3Y0})OLg83Gqbv}!`gwuBYzD>MaN#2TGqyeGe4qXu?XRozys;D^-1hQvxhiWQfn5|4 z#S!mQi4;{RvZkS#VY{*dl6Bj9*Z+CZz=Zz32qMQnZ=4Xl%hv3uFOv(YhqA?)M>$X{ zM<1@Qgo-m%Y(SWW)FrzP4o4HkR6~M!^R8-sh}k>Zg^uNe7d|~7gvf~|&ZOK(>45!) zTzV=CjU%jK1y1VQZ%EBL)#8fG2%WzdiAr{em<$to^7oh22TOGz0CMqZUv-jOSE_}q z?Bi62o#JI8Dw5tq5cS`g7^5CT`D*N~DzQ$r1)zGe9G(lQZCL_hYdh-ZV1ML^HC7Wgd)T^Q{PTk+6_y+MpY; z8yw0gX!(9z8b<_q(@v(yqeM68fnT@pkSG{N#X3Djbr;4ghH$+7qiV4!F5{D0;&&GH zJTW^t5Of!#8LK$EobUR1Re;20JfY?UVtt-IEe)=Gx2a{ks1iDa_X$JQZnJYcTnau@{+}O?ouFf@7p_M)r+}rA2kQ&i2)QRX zi;)aZr{h!={Irt$PtGGXYbkPC3nqsA`;4%YDu#hPp~C9?=fviW%3$?F@Nf}JA|*+( zhinhQcX^S+$8`Yl!}yQN(yF+Yd;A-^kF2O#@EnOntT+Xmyg$TDfCZzxoNnDp&hT|DKku;v9ZqGWK^fx_Wp!7J+2?ksY#Q~qibn>-T~oqxdH#_~fJB6nHy=yw zYw!&a+(No9+C))>fJgjw*)%fv{+gf2b%P&Z7&0Ms*Y|d2>%>~P;>74T3Ov2O9oIZLQ>MfKnp*j$`3?W?l%Qu2(y7>Eq z2GJiUZg_@MaL9rNOB8s>WRe}_k-}JY> z4@2!dh~WPhYJR4EFb@3pcs^rGalwKcjJV9i^o?fge8MiyWpfe)(&1*fgVLG+J zT$0~Ro7`r&^*XO&aQTk4*U|UyJP_X}QEOgwPC5Vn#RKDt517oi@M-?8wxB2=9QIoP z&^Uqhgm2%DCW1`~;i^Yh7pjyO0&zfO{QoM-^lj+1zs?h_TrbkqP6>CDi9*YC&ysG9 zh>`P>1$>qm>mG^!Evr;0D+UT%u?H{k!V)^-SyVnluSar^!W3giIk-IMoNrJnD#dT9 zq_DY5K?mXqLczOQ(5D9DZeDk6uXt+sXZnx=swOy*gf~;{IIE5->IHAEBZfw!8C$A1 zZ&~{|uW`JjpsEhK`y(Jj0<;rwbg4jwoe5~V&gdfIQqXj$76+LUUiw1J3($y^UneM0 zfS@lYmPczKoS=<3QMp_3e=|VXOO+(FN;G6yO-vS#-_fg&_^LNJzS{beEpVB2CW!u1 zb;FvX;v<{NF_2hL1sgquqp~WvBm7g)Zw07T4vt@PCNar z5U(IE%i|Mck{%;%(Y<)JUors-=E8+m9XYMf=~SR%N}{rB;_~$+DM;kBq73^@67Moa z$rMu-x7E9{Z>8ATS%0v02g!ly<*W;$DvSBPey;SsoV;Kcy?l;Iva@+|i_g9U>e!$Y z#+523{<*$4>ux*?ta?;wa2q znzaYb1I6P{Kc78IriRIb3ahtM1-ul32gZxFu(09Zum1^KcmyrXP_&b=HQS%$1sL3; zQY7moTUhiyUtG7mA{oS7$iJ2v)x&@MNb95vO`lSDZWOnyBphITbzs&*_;aA3Rnzd5 z-r9I)F~f4cslXjVLFQnAD+;pqFM0JL?`z`NO*VOL_H~s{70(`C=*vcM1XG`ozZ4(( z)HQB8PTyY&er8^&2q~FgM<(yZN!SvspdnIh--vP6o&(4Zg}&O>B!|hgidu}bF#-RE06$hr`2xjvV zIj6VQ%*UQ@3m;$b8JdG*VcLWOUQB`&=HPBNjS|%$;yJG~6MLh6AixggG?>!Xf-F@? z|Bym)g$hF*q59e$&b)LyTc>Ebc7KLCwZX*rvVQyhS*pTF6SUxK#I@`*AnaJ9+<`mV zxNGUN%!-UyZ2yC*fT44_-sgB9ogD0nsf3#5!A8+Ed1z;X5C2p_CO6a>*Tu8Lz)0J!TYa9&P6kqyZESYJagaTdKlNoIzOeJJA}j+ ze4#!V(5#^Hww(N#aiRL0yqx=%RyB8V(dV(@3Fg^+M|+=C=tB>Q{G*~@Cc(ui?YN$p zhA&wAqF0;y?MtD))L#e?ySS^gfD`=^Mrx*tf9$0A@@CH^w_vw%Z0E*1Zkl%ebGPdh zHkidQiwCAyucB%ap|Zf_UDUH=Dfm6Fc~kXV_Ex690nZm`Mfyh({UqzrccQ6PK-S{g)rG2`e|s+lnE{H=Ify*B9a*Q@=-jq7tq)&a2#!? z4G@BGV1u|d0vq#X8_emw_KrE$nHsW>|1;kVz{`0kGoeb}c`&4)OD4w|l$sNv!u7(+ zqT3T)7Nhrc%=aW@JmK>|;_|C~K8C>Y7#C@=pw3gXDEuL)rGmdJR6l(H2#~0neARUY zh*DNigpTG$dm|w;i^GlPlf0^uve8u%$$f5t8U2##7&+Js$(#FR9Ss^ihN7S^_Z_uB2gNy0q}o<=T5A(I;u7Qp_|x+O+e!v#oRLq;!08(TUF^O zdpc^=_g#sWp9P-C_bcbnsQyuMPn&oituAS(byCrHjWr|ZlAj0-{hqwB^Lq73jy>F5 zWrVQgj_)6aWM`H_4bY@Mm)G@b-)M@=PhTkdb#ZUQRhUrV}^5?@IsAi;`6P#2H7rb z_5=&5mstA_!0sgyiS8v1%g|j-t~e}oaVFM9p#L@w$|U3Ec^VGT^Uy>Ty;xZ-66Y8k zeZg5k^=h^*`>byP7)<>5U^<=?kl;J|9)B;3oBW&`Y*<>Ll`sCDMSV5eoU~0}Vwma^ z1<%OXc4LC|7%X1n7m46hD zh3}`UtL}Q!sqE02SDncL?bK{*>4!r=dz1%hW>20{iT3Z>837r(^oah{=h@k;=O?F` z0KvNsG~;q@;|!89PPUq<&OHlX3?$|rIorfpV}J6f^6m_HwY$JhtsvNJ#Jjyb<5Q{% zJ^PdbIEN^H;_{%zLTfm$#4S%unjKusb#r#qd=;@P=TlRfpgus@zWyhdWId?GK*^yb ztB}p>^C6Svv33|E>UZx;Wf_2SEXz+Z1?jNdRr@+B4UkMN!@I+X11yKnP!9 zn9fHKaGA&EwcP$$RDbm05#da`Xe`HX%SVH`ER*ZIVsD`g5f2lbimPctG3FmSB@%aw^h#E0xYQ5{P1EHL zIoDMjpvk_lYJWA5Dqt%=V4EdwBK>N)1NDHH^u`xblqYaY{@rh8wWhK$i{ot`KCI&t zh67n`^8)w-KIrkg*UU&@?~J|0amrHx#6RBb5=eyu0m?KLn^RV|547m@lb!j=YUfG- z&$qS}+Gc!nSp57>FxjEdERaNg`#d}`qBj;O7R_{>h1hax)EBvJqZaX~*Z>2kEc$aP zK-<84ShyiB(EAadbXK}k12R^nVg%JMW@~cXs=5`f8&*|*Z4bY%t3-ZgfwI;r)6cM) z8K28_Z1TDDqN6Og;Ac`nnE^EqFN@a;(k{+qnN*uTm^kyZ41$SKp8BjmIUrvO>Hfs_ zCmoS0?Fb`a5O-?;TmE~TlbkMmBcTd`7g5AQTBuYg$x&&bpg*H^@~Hqg?o zWp>3Uz`)6=?&ujMy%#zhqRbYQ<_V-|v@VZQi8z|Yg7M+#n>cIuYWorTN_9C~oEXO@ zVw%;OUzhi43;=>((ml2E7<~quep<>s7vEdWCVkAfdEqACxLMl{vJIWY3c_Aa8CQ}W z8uEwUU7t)Yl~aR#%}2b{$cB>i926c$=#$dRCRoJ(RD#6cuVKN|*|qxrz|$=4UnPZc zcbhR5Q&M$h6WfE6n>HL@tCHukqS^z2=dr`Gk(ZIfeYQ-G*l*gRWW+~}`i&IPDEKs} zxzFlK6UCV%5VES)b~y%l5T`=&@Hk>}EA(*=%D2@Y?Pv;IS;s_}EQ)3H81{-%?{}@R z1E^l#?_BT!6yT-#)gb*;U-bcxsE1l)8IIXlHa1zr9wsnG88ERzEs}fWvnx#(8uwo#rC+G5ihqLg zq?bsqbvWj|WXxW)1^2TYS^sXsuj+N1ey9(@r@lW>Z!2wyDTEg3J>@S|isan0B^AQj!NG6FEzS{&awq?GnL21y60?n=+})Tp*YjPy`1s2Yf$!R{U7X- zv$+^}%YOFCt&`9Rqs42uSkWo*o&Zw>^+%oFX>E(a`Dt*k2$Z#NJ34FP>KAFUVMrHo zEukjM*E4p;;@RUpg|X;a465+Hke_Nc3ON!(pwt#40`KRW(`I{AwOJJ1IzkJW=6LwH z{)8YGfP33AyUH?#Ew~;=VAC+{VqOThbLC}s_$jeQ-HegR)SA+I6_6thDQY+ctw@5J zE6=F;Q#)w^ep&rCiSB{dyUH4{%z*A$d7p_QH$0si-fm3=r1SHPTKzu&$y`prMKH*H zBufN10SUeID^<&e$MuRRHlY`hw?ltX#g+2s&qNkmmM?45;u7E8F!Bb!fYYn? z1#YN4lpd~yvb=l;n|x?8Au#(nEicyFMDFWHS?lUlaYqyo;8b3Q}kg&*h& z{e{MbG$f0cT9o}81rr8o(6j{D=V$pw_5h^3)eDpUVH>S8>f=*m?aHvg0CW>;Nvkos zsQ%%Rb&(nYa349GaN7-_4<{9>t>>Q3%$x##;@rtWBB}f%L?*%aB;-D@BtUAwug+>g z3@Du&vR&m=e<4ymTS0VTac0X@^FslMsBO4 zkSFhkA1T5Fyz+}HX{}e=5)=rO+qqJlTES9& zMU{Ucd9X1AYAc_^CaSnw`K$u{O`3OLnDr5nvRK1ddLDwXmv9=UCHv=X!rTfOC2+4eUPZJx)s?N92zG`Z))_GZSafAsTBE57+&}nb~p(x zADOthy{s%LhRD%y^q_03F)+;2M2zc*eLm6r;y6t!B*=3{Z!i^^fn)?1lw=7?_MLCa zFteYSG=4WQDQ)u6svksl@o&BzwjO(#x((#|u7yvD%H3myEuSd2-t~q)O4~ytJxghY z0d~&MDovqqyQ$&azMGDSarV?OK*+^USH0X{&sMfG^A>R4dJF*948VLGANU%OZ_{0LImz9ld~Nl^GyhhW}bpS@Q^hu z;WN&ygwva^JCd#HnMhGjMoB1`NG-JSf?ZZEJOkseXiaF zM|_m7X5UH})A||viV8t4*^#4nvAHd%>FTCH|}RI zRt<*&B%`C3l!}6fKjUt;hyDvmfR%WM1^HLS?U`={AKcfz?fvSekBUjd{Whh(X-&wM z(iaZaCt?a&hPoNCN`Ee&d|44j?x#ry~1MX9>PU5}+J4nbrX0f8TIZL=PcQ7WpuE0;9 z{1FjKxhACNhJ)zuM5Oo+iK(R5C{;P{&nhN(adx@3TBNGa>BijS`0EFzryXhPODa-( zj+wJ0Gp@}=u7QaS<(1!M`50ZN-~J3v0XUoR+b`}40Osgl3+G$Zz+0-)e{V{C3`VFI zkIO_>Dbj(f6!eYp)+V$}?zZ1L=!oyQEaB6I=9nrmt;VMTHC!l_Vp#ld#$fdO*gxu+ zu96is(W3;WR14pama&#H(5k7|j6Vn~Ixs2sr7KibaM-Q08JfnQ2;MgxQ|cT`+bByc zY;*0|=!jjdUeqGn#m)9N%PV3tdoLF)$TCZ!x}Dj{nAS<~t7x=M#~@$_4p;4|u>TR# zKp1Y~xJb~#d@4sFZ*_Y-$SUQlg0N~9|GHEO*U`JhN{cvFE-5#jmybWcN6ENj$do-X z0G};BUC33j!{*-9aE4`q_$`18^#A}G{|mB!`HL=VZt z1jZ7jqMi$Zsd;#$?td)Re8k$4y!tqr^R;SFr)+_G0%RoE!py^6`N{4`E03l1@Hf5* z3f9JTBQZ=b1gSR1D|p6ajyRB2rd-q4f1|ek@liY}4BS?Dtg!)Jb-Hl56jk?@j~gfm zJya)2%tcqbK}^vl7SFTAd{)r1w_DJ3zSbBFRJTopniWaC?$l%cMTE5~%ta&#dBEp^ zC%gSd#y_64-Rs zPPXEvb;0N7=4*X)W=@Yj=BVCfN4G&nII~$;6cdg<2DL!Fix|=QIer?Xq_IaE5y)pM z#gi0_@N{Vbw^e%r|X*(8P^pGBY*cV7hqw1gpH9BKNqO6i2_6p^%*kxW|NS>46zsr z6(1Jx6QnY=Miaz$F2Ab_BXlFLnu9Wa>s1e;qf;3^&`%_}m26j{3ig&CW-($eewg3> zUUkFZ8Qsm{=he+wm%Uf9{2u|7<`e+BX8jGi$tYQE0+Fs)I zY~k08fqAAYMWNa~C0hE*tVhZBo+ZcdL4ktf2doy;&jK5m=X#HF50xPugJ7f*!`5?_ zo9c2Z%1mvSj_w(JkCNCzwdPpM7c;=8Sr+fTyKRH+uNEb=Sl@FTzeqM6idc&ctQIG# z-^c>-gxXK*_Ll@YUbxHdpf?(hGWeOQ%p%7p5}?euMh*;m;eM$+n@z zpOLM4$acL=ntx96Qe#eXDKYt#hB_d3p-J-Dc}H%x&s3{Z!`iKLax36B)4Z_*cj5gB z3p7_&iY(p1;SH)bb#8g8c4MDeIdWmkLR09KHHrxnI|8k?6yiJ5wR-d-{L$+0@~Jzj zdCl?Hp2>)Ny`_b{z_ZTLXO40gK_I8kJDD0965dcDPKawrCxW|AVbE1x{v{ z_A9)m3alnyGXqt1W%_$;dZN=A?YRcD0#>t(H2iysX8F(Plr!$)r0?a6B|RBet*d1D zZX{d147+D4HMtWY6)9J3^?=2)ZbET>@GFOmDvX|DT6uv>&zciCwN{fyj0lZf;2>lD zjdsijOi`JIhMso`VPT<25y@+?T#y>8l$|-y`X`9*8=qpnxMI`o7eQs|_pVpIaFag{ zUs@3;EK3VDDD~);4w+sQKL3*{cdjyMOUid>jara^?S`_A=8FaSH1}z%Hz|0WuKvj! zIDswNp9@CNmKJe>$=J!Yc=HGjwK86~w47>;CKaMx5!sW$F-!>GF-YNGgBx%+`chL> zc%!^SkEP|0z3-IaQZN!oQ&NfE{2om658rvAa}g9yQv(5Nr5YZ3IJ6d$MS16@?M*)m z!-;pnR^J0fSYkW1o4UzRpyD%ntL|dSL~Ra^F?fTHI5f_Cytn+kCxxVlWu6Y8$LDHk zzdal#LX!P0E7N%dFSK)X8)emf!6~?<_~yvpLTz5`on)?5Pt&-g64`qBeM z-$6K6KzjoZnVScv{vo*_=Iea!)_u`i04SL+S4 z=;Sxet zX1!nKHPTPCI29s9nIo&z}5tl{qM@>aIgJ@Y5y4v!Z}pT zLivAyG;E1G^q8piFhQE7^V;x<^2a8qF}yRBO@D@{pENLY#3Y2U2t{tozVtUCyZL5L z81Cv!eP64}!?evcUCgyMYR-yAF@!|2iC*PN|;J5#WKOzATf5A_bE{4@R6>HzGEO@;kG_bhW6I;3M~f9blQ(k>?oD37WDeVHvw8b0kb zZZ<#tnGO3>hZD~m@3)X{xy#ySrMw+SB4wTSh0wEEgfhkfG`Gr9Xhvj@yZiJ=G=5W=UkUpwS1MlT;2%?+D#O*W|zNN8M zN#dN_3Tj%mbt9jvqM{nP_}nr_=rP{^z%tfr=H^Ka9{MlZ*X#QXm^|}uZ``tZs^O<6SXL*)~ z0gvmC^R7csB-Yc!ylb(uyU%tLM}Oykj+kHfGn%eEa?|0~Hd~}qCD5ecc`TD->E+@? zTxL8G6}AXHtZd8}=!}TZLoT|t=6ymm(Np5EeI)pBr4vetDD0ot1@Zqh84E{FHVUTWy4SQ4n)H~-|@ z7MQ@H=x*0{+>)O^H0+Zin+~|M_p;$Y|DQK-$k?aqqgj*tEE2OtqZb=D)EN5n+Q8h5 zFBoc9E;4!LF)15kn9IRo=bnRVej;h=LW1f`*Odk?Q0!yK`=4S%ioS7!@2n;W4h*RV zgRQhQaS@~ECQO6r;E8;MNrhz@H1u`XKIYi__oc)}IR=uHgmmbP1v5x}%^840Xet;WLwYV~_F59p|ru#uqbN=W^pAx*roGKzm#5lA|kAip2F7pFL*Dti6d?Fb( zAvnD3`;K+^&hL^FUr@G+0b0rUKmRY5av9j~YT{{h#f9EoVteP+W1<(Et&9Hg)M*7z zWyh>yjOh91=PIjmB556uspUjHn^?6NM}|gAxLdhX`@u@K6#hHPgUybLVnC@=W1)$N zME+r!qvI;x=VqQcL5l!&(_=(Wch|-2SpH!5aEr|y^WBtEU$eHWxugc}qpY^V zh;^qk&q4dXWW=b5>$0VSf@p(TI@8xLg+RhPV|c73)}lkm{1mn|9t&ao!?^zHRE!p9bS5S6sz|f* z4Nc?aBZ~amBwkMMh{)?Dok;HXDd4+$ZEKdb2lENhk;3Q`z#b5hmJ9ElxG7Y1fuc9EnVj7~%X4 zgom@Zp2QBn`)Pa~yr-1klJ>i=)aCZF6S0W1XpRp?vlez{Cf;Ik`6m%%R@`J4dCcES z9~&`Z%3sanxsY>N1hy(Qcb=K{Or0ZhNo!FbsXN#`z8t^E#j_pf&o~tKbqw`d@(Et zIzA$*BnA)xxnqg|SOO+iR^k^E_u15dVk+tXd5%eq;$Y`$p^15+Lj7St#q@DokV5i8 zuHGS23TLwESevecH)x?s^5L$|aSJPa&v`vw2IL*jrpK?@2wy6>V0MluJwf$Lys)XC zb-P%}pL0VTHT__9+bQo_ZcFrpUeT_->)r)QRgyDC>l9KWVh zVne3&yhX|AzF8WMigWdH+S0C;?X>)T)7}}5ntYT4a&tv1m1Q=5?DqC+@Ab!mTe+R~ zkMmKHb2~$*yy^kBbuQ{t7)y(4lkFMifVDJ(OT9qnIC`S5OZ52k3F1{oqlH0qzIEdA zp#juP!&V_7A3!)(uy)4>g=qi;SZ$3DSGilaCWmP~1qf~e?6S9m_}>SGw;5aQ{!1dq zWwXxDUAJ&PckaL1q$AuVNouvrA~Icv1_eB1bMn@6mOvw)P=;wc{w5!m0faMFpK7oA zbZU)H_pz$h>#x)QKJArdOP^cr+?{_s-(Zq|nntg`=IFygG zGN70wqxS|s#t1g1#PEYppXu(qwEbr;$BuH}hb^j#Hk2vOr3OXpr(q+sf{e=v;jo}! zt2g(mPMDLqDqS|R+HK5UU9y(@;%A~a)lVFs9zTSE^!Zz1e zCkf*BJT#YF)x1|c>W(Er@#~L%X=_mX4TzVoup)d5Y5v}(U-+~9*3%z9HiyirM@Nc? zl>zc+5#j#>6Ub&9qvzTHg*dm*N?pQbqQUodj)nBNZR?{vK4@0$rATq?&X904t%+oj z{^`kw2o(Autu&gwey)|GIz93dt9SS(e0@uc1U=={67XD%7I)tX)a;~TqlT)hS-1L? zWUQD^8BpA;J8|r(AG7mDzXWKWdZaQkoFz{xNSb=n$?10Ccm^Ms>rf_anvsp~H)-y< zmxu36tzGF(YR^fuclYVOcG63D-eNtBe*-+|q(@+uJlbhzORf4xLKz$yVvauDBLo3D&D_%$(jUHJDwF6+oXSGcQ81q9~i*3Ij5R@QvB z``%IHcC4s^q12*NDkuWwTY+7)0K?4$xsHRzJ~iR`+vC}uvwDh?d)=ksxrG2pow?>KC}(U7LCPc&dAy? zHLhNQv*3gDkyT6nULK9RYldSTaKUS`j;3uGX|wBbp~E=!^BzEtQp;<%&6^_=mY9Tf z9P68f%bQWN=yP<&ja43O$wJS~CBWVuw8-s;eAmzdxUfBm7<*Fi(#D(89&Z8VF0&J) zE9aZ_<5CVh%k%`(bi1ub(K9RlJ`HT3{9^tn%5xdkx{mH7lCS6?&3OrJ-SJ=(69BS* zSNhX-B`H{Uyz{l*J$Im+bmA%GuzIL^#&^XgI{vh(rfKEKwQss%m*}GYbcgcuzU}KV zREc@vY)`)$9ACD*R^D(5RVaMD^9GZ+i+s8{ajnE}GtF0rf)XHhF&Eq5Kw0ON-k&^K zQet6M%EktrPIuM^|E_mu&>Ss#pwzf0_LIg>w?h3@>$!Rje%nVz)Bw+ni-{xi{cCf# z|9qK2rxzCdPdvK6+|a1nTT^RRly}>H(BWCbXb&Pd*4CRF9`G2{d2JiR^1k`3OuSvG z#pdOjZp0~|CiUrZs;X3Tt#fw>`KHVs35IdLI3JTki%e|OKl9VULi+J)TN@8kdTy9k z7svP17anGU<)uhG*Bp5VL5VO=%AXp|q|KQHzCv9je1oE9_O9&4mM zble)^^Agbh*kD~>k1^JD=Bxwc0eWBy9rJyR6r1T948!JEBK(*NIOfV2a(zr9%I16Z zO@SQo0pxJyAfpFgGzlTk`U5afwa*6iENL~5kt+u4XF6Bhe<25A`ctGx&HIC>bfNGV z1~9Q%FwI0Hjqfu6hy&gM?AIN^X1*o%|#Qs5-sWzBLB-+NA)_X;)z1@kIR1 zA;HI~A&Eb!4mNz=78dpzuSrfNcd}TgpX(r1AIAr8d3}_rAdKelN;)p~vM!-Ww4Iq$ zj?f)2FQXzdxaRuEb{ zR!dPMqKHjuB&ird5E112*86#W_wzZ9-}wi}VI;ZE>w2HpIbW~7RHyAO6e&@K_qtSj z45)7q5&9s?F{kvA#m2h_w}Mh+ag&lR*O3R!VV*<#_oCOxPjdi8-jWIn-KKT;4DI(b zrvna)^Tzu#Io6FzJ#OVGyD1XS1hn!`Q;R1mh5+~aD?kwH?|b&O?~(S17#_9&AuQ`Z z<0>R8pq)RZaI({{UdJ8fIvZ<^+*36EV=i%r{xeQm-}3lRpoO-0yJ&i^9sc7G*C$Bm zFPQ~m7X&-*hSHev+}jjvbZobNJ}?cJ(OUw~tNrSR@Pbam9D$PbhWsJScNj&@v9RG_ zrzdeWXX#$}*{^+h?#DwUOt)MSQ;sp37l-bY_??=p*F*kXn$js7pzb*{kn9q?25zk9 z6+q!tAL1Naj=P;*Mb!Mv?1{*(&5ZO2zHML&HuA6BJ!^a>&Gd7#g^1tpVYs9Ue3-0V zI$<`lR+l+~Y^-y*@&H_Ly(qOo5keRQ1(mqgqxDFi&X& zj@ya>48(EtQi`Ui7~Fg2D>Iwn<7D3HAl&C|n?*$-DL)EiFZ19_F(SCjni>fi2GHF6 z=1Fa@1rWr=a52r2-xNI(6~b{_C>iLPa{2WDLDhKn^aCCQvJq#yknz_eM=XDN1&tiV zi=7*pzIBY79n|sN^N>Hj=W|8KUFFXXxT`kMiaKYa1!Cw4>NsYU>`>9l)6qw(;Uajw`pw(VeA&>=$gB|+u0*7`%~dX74y zs}01kCNutV6~I@m?t&3IpVEG=Pj1pKQ&2<2-(6wzl-;yYW?o2t=nszt*HQU z6ZIY=snvB&Eqh7KwIpt=U&PZzQ- z(grmUT&669*`@&_e_?^%^@nwfdL~W%Qp#dNot7Y<#HZe7594CLwXdfh^JKN&b+EP@ z0Ci2)yHp7RWPWWWewz1FWyBE=QNPmawgHNB$V^yXRzQ!yQ~bP$rQl5frKQ*^6+Ef7 z=NLtcdt>;u6Qb2wZU81%Juqgz?O$^%8#fkJ=sY%((^%Nx8ggJABp*02>LI&o#8m+;p~3)P}#-6w$BxM6a3Y#?lR9x+TU~ibQnDy zTsZ*3ssSt3Uu4u4$syC||CK;v-BY zIdfFu*Bm!Gq5}HyWMl(tYFueFHRIzMS&4I<(Y|;&nc;w?aZUXR@8qqi_|kVot!Otj zlkUCCyl+ZXbE1ZU0GW|Vcw{O3pmYMwk+K>?^_IRZ8T`O(7$(yGVg42)`1Vlj`ooH_ zt-o>@kvRbXqus{j^L6bw@wCem;uN*Tv z<#VZ7n0(6w+jhUuEO-q7nqtLr`ERu^lDdEhYX>NWbuGWPzsUU!!Cb7xt!;S*i6@?& zHfT{eZ(8corvgo#qkOkTY)nr_-+f(SKWe^!R#lhpeHInlz9$A2-;YYMrIarId=T7kYJnt)Q(@{fXGYZ??^=YT8^vXnbL0ziazB^71WPI&P13bn9z_U zhl6bNOZ6?Cry-}Fl=e8XXp5>0bb1DiwY}3RNy? zKv{5qaUC;cdFY|%SpsuE_xr{cS7F7IArFL34%=CqP<(0u7j^oT=oAmeZ^ClsQ3Qt= zaV@pwqUogZel-bJ#Wzvq8YWacM0L{`X^zgh@}f@tsb%`LjZqrSYfQbHs5!btY)wQS zCdt_~MQ-e&8FUccUz<)@lVw>x$RukeuVyHDO#Js^wd1v>MN~vlfngr+DVU-1>YK{y zGE0dO>0^z+NNWcSf#t@@#`oK)WIs!6Yjk5H`*74Q)aDP|yoU&q0+iW4#1bzNMAd_C??@|>v z^QMGf`F-WT7vLTHZT&cG;ysfq{O5GcA2H;;|L{bG971 z8mn2jGv-%zleL$I=4ZqS=ctTwQJSodL$40c`h}r`T-+NbeGWK1!8I zF}7jYQ5bw?imDP~*=rKP=&-Q6><@JKGtsMK)fKLg-;EY=b;c1rHacz39mW1Km$90Y zA1aZ+?_k2(k0e45d@o-uzsR@R@SaeRl!z}Z_c&ipjg zzw#wr=?zSo^nN$QJL;h!tuoLQhuUl*6?_0cMKK2qzTM>gGGR{KOGD8du{{O2H`4(C zWfKS4Yj5=of7jEi*#J=HTIhV|XYp*Wbk=H>q4Kz~Shvy9K%zCkUEWyH>pnOd1ON&@ zTrB~Nw1GE<7M+6x0(a+kwnqkTiIn!gp|-HikRUi|l@^^_n)`|;DH5A8!OWwDeI{Zm6twu9}OEBA!1Bj%GFgb z(94nFh!Ilyqs(^+o<;1;2RB>-smb`Tul{BO)0&>RM1!E31&7}f=TLC3y`exy09&Xg zEYzlkq8!|L$1s^!ot}=D4X_rmG-9h$j99rE9W$Q7Q9AMTooduT$b&7>ri`G*j8Cj^ zl!&T2Lt)D(Sty55hy>9ibDgb>8i!yb)%y%*vh6SFk{$?-ejz4iaJpV|{8H>wuUMV) zvd532t{h!|sYrT!z6pBto-+UU*vMXifs-w7pIzcBfE}rOemr06(gO42ue zAAf!ADC>+tzP`4BhRC8P_tQA}`=D649A^`>|6+|(c@1F~h9;>vqfze=kFm%faAHXv zb`pJnT582?Y;}hffkvP9D2&S0x#k=IX+JxB@aB74u%@x`fb6?Yp&3Dg?Ec#CQzbe<`%Xw>TsCIt@9$>V;zjmR1 zVJJ+#iQEbTMX;~LwX1%kL{SNDk;FnW=6cu?re$DRUY$*s#xFIjjv>(CXzzA~_@!jZ zF)3c4YFTB5$H+*Q@@BkrK;_$=oY6CGSWYB{d5$x)z2bzjIWTq>#lS@h14picERIzrxi7wRLG1Fa-ga}Jty^joidSn!FU8h#qNQ$bBTn@L`Z`KN ztCNzWU;Nk$giEc}LU72LcJY7bv}e~epa7m)W!s`_q};Rm{WY;B87vT|F{VXczLzt3 zkoym=i(Qk}50@?>aJL$6yOZR5a{ISLghg?*67D&!DwJBX2s`N-i|~9?{_&M()g4_@ zEobhn^(1+O>+9LzH?y~eIG26MFwGP$LmP|&Gtm0GKX~`|=9e4Q#&y@q+aI8wr%Fns z;+fu)<+PN9RnEIi_MSj}NawQbHq0$#I07*g83!`^1BXd zSaKDycEaeH=~~Nr)97oVl(me5aq*(M&Yj3np}*)Wje#Lyv{a-5E^M=pve3j>w>NKg zW%0FL)D^I0DJQ`i1mSK6YlA|^m*ct{=0fmv%~i>o1<#jmRX--AFKW&i|2${4Nh?*+ zvAwwzpw7UrJB1#oDali{LsFs>kf9nq2sl2yfAeHb=aAGJx+2Dx-z=D8NE5jKatmKY zep8%uw~Sz~n+#AJciNFxP`DCff1U4#Lu;^!u+YKlfUfvk+XDYY?6+IB@F<{KaSz*4 zE1AWk6%OC+BjV?DiMAfnCYRn3_@&tA)BG|s5J6^E{%-T|AG!x?%o`KngS?VjbJo1c zN9d~3d}rRasILn^p4SE|nyKn)a^e}n%1T1Tx!qa(lZmOj8_##WN6#!F#K6@Q{O=#_ zwqEUe{x0%EC&uXY?siqLCBAa|+kBkaQHP_u_F{Rn$zP6Cuen@QxjX8fXtCHy@<9w{ zegBm#Z;$*{b$LQm#CvRMhd@gB;}Y9*9)L?b<~2&Ml5F{r-e8D<|tc*u5z_QxH@T9lvX3?Gs51 zOG?jSJQhgK>vR1~Eo{rzfKGt!7w8L7mzxMPNN|l|j;jQhs@#0MBR~Q~=^mSC z%Nk{gP~~g<1zkI7n%B;$>}PC@rAHv1^J%@F4czDor3lc_YxSbbd^1sCAcLo-iCThx zl7+X;I4pR=*Y@iC_dSa~ z@`yv|WG4V&cwJoU*W&MajU$jTUj+W2a;?SX1E^@0}pU&}|BfapuC}Hdh=C->dQ1BWy;H7$n29%YR%{ zU1eyOjpp7=KzFfLP65fT3cao|iSeQDU!Yt`Mla+=w7p4cT(#%qzO_}ZCCf6j$Cs!% z(t&%r9DEswK$(iAzf~nm-jc@*HbRiwZst-`&i-KjlT=pOhWE*rTI#bDk&djd$0!%@U&~Tb|)yPb+Q(R0P0=o`><;KqIE8?Qj!Z z)brWKnRT9VuX=-b!v#z3tBJ}YGgG7a{g>etZq`LN9*z2k-F&Eap>adS<>>Qu;n?{t zRh8eq=P{WclbEo@7qMr$!Ho4tQ@)zAks#ceu!)K7iP-?cxQX^)ua%os2YpT@zJ024 z{oATrqQPX*qo=#N5;h;5f}514-hB2uY>~1H0BXP{9PFB{GA(S<$wjr}g(0ozVMX+p zRh=l&zs;ICs7gW0pGmEWr#KJ0JeioJL`46p9+oI=-VEn8ao8Q^(>9M0f32cjdHwZq z?T)60TnPI}JNuJSGNTBX=xsi&087bg6OM0foMi<*qSt9akOlVt-HE?$!anew(}w-X z+fQ~m96TYK65QwOC|`Q})w=Jn21=(xijH02iW4CZE+MJ>cQ?GFgkjGgzBr)blV1zEB3ZRX5yvphef=gUxgABui)bMg5q6 zEagBEUythY3OVuNR)1adXIC0@=5?f)ty)$)iuISm7o+CFh7*bW3CwnfC%=hwIrG@- zey8t$l^eBo4wxFB>rcLOwaTX|v`X8n=WB$Lt;63{eD;599*H3JqS%X{nw9VlCabEOTp}&gC?uu8L#4fr}f#P!> z!4tVQ{mHZ%K&5*z)NlQno34fJT+0fzdncU-ewiw~1`S?6B3nEYrHK7%bg!N>=3>!C zPm_r|tyGdDV<7#F%f7%-UE{lMPc2;s#=qtpEdHLUx_jl8yk7Eg*1QJO%-Qo7eMoC6 z^~FZfgi8O(;T_n)ji{x2YYiFM$c@xU+Y@X=J_+tjG$m5zyp)1NMx|!`^i(6hsLXe zWA@PL#NIALwY!5Vy!34iT!q23ZOf@L0yJ0H@&aEU=B_c<^@@@Em5-wBJJvpO30%Zt zY*Ecy_ddcIj%Z!Y|IyrWHqgG7f9vli3^ephTSOP^fpBP0j2_~-XGJjO^F|fi&}q$c z5aXrT)=;tOR>_&uFiB2pT?#J%FkE&ECMFSPS>NrFs0~oc228gUU7eE+F+D{ zIBsehI27Dg?M83j&7T-e;y^uo{1~&8m8z><-fkD1(2$nSYT04(LAE_vBZ?=}_XlxV zua}5Q^kuIL_D3&2>(Z%_F<60a1(Q^UM@!iK5Dk6YS7XGN)2OEfMBHl>IcE0b>VC+m zX?`Ae-}FBzT@$aoyyer+49^V6=DfzP`S|2TAg-_3hc!ex+ zRfgg=`^t%9p0?IePU+1oXy&D;Q+%C>{03C9zA(bsE|S>VCC|rJK@1iAA7#B}&1;lN z9=Ni>S6mIOzV-ZQp_X@r*6G7h`0}$X}EfiXM3YdSsUTqi}G{mDqgbLhl`aLJx z6G~r|ZhXD{W4&pxWN9E5Tj{z({K0$*YJJkILF88dm9}PRPVkF?=z^U`VM7 zb|3(_n18uC8!nFYL}) zyaD&ZX1vy^{SmqC{&nBtAq>aiT|w5;>` zSTYIUU}nqp!S7P-kf%nM;7MOleHr`l$N>VxP+4^a?05I?Zaf3ZH4{T;n@n1H|B5rf zlKN%8RN4zg+n!xtHNZvC=+Cgiw5OkINW;x5E2lar)hpSVOS!Kq4Wc9{@(Jy(gN8e0 z+>K{<>u=T* zQ*Pv}bF#2zK13yGKIZnIQlrFMX)fEH!5YFIf+F|{=$uiqYQIBX z$LB8Ry4~gGY&HKgBOrb(B>jNql{rn(oG$62huN(PdG)*gYCgTL01~yPm(3ffar~99 zU9bpkJ`e6Rm3ue(s0qFf1l<41Z!Tc!Nv4K&UQSeKh^LmlNP(>HQJ0S}GjrP_VZT_d zG;~$zAFo86kZ=QY>$xF5;w$aws*7$5g;t2w5#H%tmDnHjo2%!~8*3vPj%KdC&0930x&X< zZM!dE0eW%Z;ry(0Tes!#;`pz}8az~Hth8?RaWX%nR#6r^McQX{ggP*ik*5_*i zO6KxEG2#6%VblwKt2B`s$Dk8U!A?)~?}Dvvz;mkjvh>bmr_Mz;p6xQmCU zh8kwk^^kwa?_aY6DC0K#&@a*;eD|~KGCAUVHQ&U;xEag+&7D)P$Wu$9fpgH(b9>4m zP-SjS`@3n)(Ag?Q0hJgl);rkI30?DP@2Mwti0)o|LJI3J{#TN`z1(Bb#`DEVQ1~a# zC#tufMeKJVGI9aRYc=dQO}Mt@b~f1I@u)K4V&>zB?H`S0)7{JT1Caw08Pr0wYo@ks z9Wq0$+%+CLLc5fYkVDvg;>b|o)r-B;Ma7yh=x-3Yu;4(^Zu!@?C zncoVbFTxqCbgRQqQsNZq>TQ1QH>$tIpEzU$8*c524y4y!*zpq~B+ULU6}p@x{`WPK z#)EI>pI3Td$nVON|L1wIo2uo`z2A?2mMV_FD)$_3;gaDR^TKd8)5$See8Z(x)TH*H znu?EYzSUXwLG`npd34@5_0md@@5gPS_NOcR1YgC;qOG}8iZLb)N?d6hcm1p0=M{w_ zcnhB;fn8La)#EN_=g8L9)kX9d;yozo8kOsL2qU{iYjRLb*>td&!fO=T17t`yIc%}i=l3@jY}clW)HsSmBWe>x zC|){vxKx6H)ixX4X(DI_&n?4B$$=F)Lys|Gqe7~g#MY%e>9BWUHP7TGQT9QUcBRbrP+S6|CGff3mLK}H36^Z-X)AyGNNIqM)s%?ct zRL`^xM7@#Y6puy#fd;LDG@INUNnjo06adjxs z_I}chp>I`uLl5nxYMM!^8J5+%s($C&9R~fULMmw=d*>@O3B&Ov{6!Rd)$_z?-@z#C zw|u1cdF@TqQ44mP)6t4GuIhu@1idrYBMF1{{xx;V!V1(v>vH2!W9F|`-tQ`7*kl{> zKMHLmF8pZc=%yf!dakEAMcw@>UE|}~_S{?jzToz*DP#fgPd>CZ?nIMdCvT>#)H^_) z!f@lkn67I+$u0)Qw~e3ZKJ&e>@uh`eW?lCM@6GymXZ)^3R|3vGhhhFM4|<@swYgUp zcL5hS`>!c9>pAhN>o`+o^``YZ*2Kd*5(Jiu5&0fj68$T^oBaJz!ouj64a;!}MG+ID z+-s4qpl2l{yUrbRl0oKaJLgNrH&k2IT{F1H+#3Ddk~L$IJWO@GC~j)(maSc-&U|{h z?6M_W`(mApNbAznT+L4+PeqkY!i^;T%3iETRO?!Sn`M;dd^NS3q)<8-;=-%HHoUZ% zYQ%!huGWU_M!UjB?bd0pf++h=9JHQlV!N!+6RMH27=o|2Z!2_Ii7mrR(QwbA+3Se>JU zTbc*27&`FjI2ZJV2))n}dbfGwhTVd`*`tW8o{NvqhTL%Q-xk~q@!o?)v2QgcaWkn^ z!I1OV$*;C!n6GCtsFy_4mi9)xjs5Sy1PYe0$K^l}dtbd<2g*O5|-^xmwSGr^T z{yioPyw&B)*!BfM7G2;BWTMCrrPbxoHv^(kgpEemh$0Ay#6P)U$h;Hd4jmu4mf|R&+{9V~8 zvG~!FRHTGs!KJCcfS|gb*d(basbzBObs~wjQ7#HLuL_#E+-B;yr96F41ymx?(n@Kh z`c-SsJd9J`OQ`mkfIm3>ME77R^(E(n?tvx{Sxo$7O)9S zfKpN>(fhndeQuqwZuM_yYtBC{W` zMvf3=FT;UO%4y4%=LO{tf`JTXX()k&uefEjyLsry6UzppTha&-hh?AtNJ^S5!!>5P zvfcFvlax?Ar_6VCAOI$3?N+hszCF#|X%#2Qr6BPPE5XBeRHwfiXT8yJg-eQEYi;?M zh=wI{C(74x3n8SJTsxHy0%0XvGLakU$Q~tp;Y6N-ZP|o2hcrlM^NGY(I&I@J0rp)r zWO_}*YRKrBC3H@_h*$T%b$!E^0uF6F%IFymGDlyU*Ic^|feI)dG@cq_HQW7b^}e3tI%w$f3Z)dgH?9O6Wio@t@9lzb@Tp(@3I*m*E1*LTCSYq_dw#A@ zE*ixNMrC$Ir8OO#2JF|S*QX1(q=r`;8Dd|6*Z@p+SSV&6sCoG}$@hZt;AmOXRn=i` zD$Z?IzQT6H>{IDWOYeSFkrIsT42%7^%+>FpMnlj!E2*If3+*+%QC~fZFxj817j6{& z+ylk49&A=tnT|FOn5pu94Cd|~S7+pHZSHICE`DG=WhkZ&c!nhDfV>8;yMAXDf)7)~ z{}J6Wf_`g)*j^E%Pjiwf{4UP{ZBI6=!91LnaV{5(dTFk<&2!)*FDeDE^Cn;mo}01{ zMT~0OLK37DMA_>`g)uy5r{~5e?H;o~Q6MhCIMPe>-Je&M@q@!eQEk&t)+B_uyFQfG z-!(wjASnd>HNBOa%;yhbzItTb0zcp{^@RyZfxc;tRx6q|dk-o2kTMjwPdnUwAf!6H z@&?^}-M}W|DAt#k&2e|L2stUXNC#F{3MOB4hVCp#JUuL4m?PT8+W0U#Yjgx=*G7@z zl=JG@(*Ig48IvEE`i?Ti$nII|VYQ@=O8{=1;G@jGfrTf}y9-i2 zdMO8G16K2u`lFiu8o8P#;wm-Z^=A_9*^0-cX@$*|3MlP}$|Hh9?t{M~W8LxJtj49I z-kSh7x1P`~5Av0}wWQk_$OGkrul5?4y$0bL4=;C%`H<;zBMKhLlp2Y0^O{%CwWN9c zv7+oN0;(3AvR6dI1Nt6&y{`e>+rKxais)af<2;e?!4#2-*f)rii^kV0GO#(?&NQ2( z^Y3~w#?VW?OoP0W=xtiql6^&@_LW&C(~>j1H{J35s)*)K!5pA zuYe@D($kTBo*U#kr-Fm*L`TpKbcsZv8boco?mgePfi8muP zwdE_R&No`UPR#cXIIT-PfE;vPi4^hm|7qniSn_jWUH2?|⋘o4I&#dJA;j1;?q4) z(-Qek-QJ@=;a%}wkdx~BABCvDw$t4qUKX``5c3qEo^(GX0$~G)Ktc#lY$RaFd{qhc zj~~@gUs7dT&~P8f&oLv@L5(X>0ai$aQ?1K(GLJl}TynfpXm#^)matP7ASZ^@lh zt^4PULH=W|C|rp(ve=YYj*H~4^=2WVQtvz#GLa-p!`Ouc^|_DV)uYeVBWyg~W;-tC z-O3S;&A>x7buak(4Z7y3IXP}MpAXOSSseUCSBq28m{(^|g8s)`$8&oAsiq!jUlE}O z9btNh`D4*$HrXsh9limk8xtK+Dd#uxI=qP^J^2MvLjA9jkW7x302RULH&x?XkM5^z z%otzSEqeFFuHqCEXo-DJ)w@-~XXN-tBgwg&uGV;IB*|R>RMcjmaiY4Z*(Rh~JmOQF zi-SJNS3N59$AuR$U2j>QNzfFikJE-+--OHz(Kp8Ow@X_jMPjSVdmhH6C(g~p zzIuD4jgVeqk)k%rVF_fFDgWcc*=YT6q!4UPlB;Vc54^wZuQ_LW)D~m_@M%MK*NYG0 zV8GqJFxt9{UxUk^InKud^p;P5g{1r3T^ha}(fs6^6mxt=feJ8$8&{GWhrHypT`vFB zMW|v%7b0lmwfq;$uxcBKU>2g@!WYhwHhzrm)cWd$Qp8>~ddd6KMQa_chZo|`B^E7- z=MJM}bbQgqV--!SuG!nX=uz<|Zb?FLRmphlf}3kw&JVIzKvKlS5!H)t&nk&!Cou3U5sXpuM4`$NbjMU?`fnf8#Z~ z3#>=d9(5;}W&`B`Ss;;7(v4-$*vu(a?`F zD}J+;)t2%<(Z$&fHCd`GMH_zNZ$?di3+xCJ>Qy4C1P98U;rf^*(iJo>ru9NX-{QX; z3YfItXMy|#rsHB-uX)zhX9cJiI_B(sE$>YvCQCJe>a6N^LENmvNqrHp&vQw9R1e9) zzy!#SvEEpD3$p>=&mDBqN9fdP>Juw`?|-NQU5#0|E7V@F069$Oe{<&v z^0Tr~RXU1yv*8oH*u5sRHn6-6I%tg7g4j9BR`vf%R%#h%!7!Tsy{&-Ngy#T$1i1+# zTEC;>*fyt{@9Wxso;L>6{v=I|8P@cv^3lwl3%8BvNjJ!NAMcwLU({8vk1p4-GZ#Du zs|TdV8aoU>3Fr2G7I)uQ=G{)Y21_q{rYbL-k-Be(5ZNC3-RO?^)vqimlchmi=&Lt#r zHu}lp`4kIH5OE0G>c{l(lTX_$xO^%n^q!6v%FFt9uDkv;H_aoWO=gJuapPipqNg3q7 zTM2eoFpQD^asGhyz$Q$?g$ojryrNw6>}9-o>z{~~mw_qHLIFKLg<$VYR?u}_iQ6CB zJY`a&B-@@g6u8RuJy9QIy}h{d-2}39vhSf<(lGb+4bqg=P>PxC(!}V_7?W9}4qOu1 z;3Zw!wK_MWDyG&S$uW~|O>z{Xl-v4lX~On)g=0sp9Q*H)B_lYqn@^C=jJ{Ku0hc#; z(=z<^{AX;ftc&y^b`nb^3D#~+_zv+L0HDdKyy5?D!mWHD)sjrPhd_c6szl?Cx_V`1 zSEDP+w#P?&7Oo!mv-+7bEU0!O*y?e%!Nx{?+3w@=;NILsu;o&d(R*vxh}Tdz7h^nz z81l@~PW;5^T#?DRezqa_h#Ky%lB{03HG50Y=!N3Mxvb29x{&l75#GyB4W@Nvz%M8r z@3+>sZapE6BS~AkulO=HOSmhQAl%&UiF28Af8SrbgyOXMT%z0fNOr|F)WONpOLug@ z=*o|6Zh%6A@vq+j=zl;2a7P2>Ocy$MUtDx<2l z%Z+F4%Q!7`@C+ZO9)0W`c?BfjvhJNPPIn-!2&3$u(Htd?w7ovJ-~){?5`dJF@v$pg zSqWC@k(WH*t(zGr&RIhEXrY{7)q*j-+LwHxxrBW*BonD`KK=-Nx_Z8E{--a&X9tbX z@c*o8-*CqNcbu2idh>LYY)X1t7zsL=e6ev`(Lmw_Z~U}3jyRAtaaAJ>9U62%Npm_x zu>s^d@Pu?!=-7HTZ1wl!8QWe0Awu_`Q=6%=Rx?zy&bKrVS?AC?4gk0okeJ^6q8f~7 z?e$n($ehKuwub#MTXEm`JKQTzK^H0zsBE_=e?JAy8XqA+#0~cVO{Z3SxUt0RPjNr9 zb-zqzJU8mdsqloWh1gcb$D}KRykQv;=PJ6%WV*$LqDcl7B);s3w3p*~zsmIFLf2$Q zYnHpCiqfT^@jEz?7Cv8{WPi_ViX6p(i@p$Rr8+aa3)h!hazzG>w7Nll31#|29Ly9$H!QuGm;LQxvYMOMNSSlzh>0o1sdyXGC{od; zjc(D4XciRIDG&3~}?4AvYJwt}H5 z`MWUxX4Sa-ZQIz%N#*<&j1~2W?-jKgMs>DtA*@msAr{a)id}k=+|p#Iw(2wK?d{of zk1%mcZnSRidInD``lyh^(0mMzRv*gFuL6NK`f~gDs&0-{zg}qyg?;be{G8zi zR3k*!x=x5QW#sMmc%QP?q2FdvPQve3K{MdnX+)3d)stD5aW5 z@L6hlpZRb!;1W{)QTox~8Q$KzCtlrgoh_^I7CC#9%yL74n;iPHyO{vl7mr-Za3ll+ zJB9k!(*ahMt9jTwr}~<^U9+W3xKJ`MSZHrOg!hBwCl!HV3B+q`rqW|Ep&-fVKL3cJ z&Fg*x9tlSs4VdHIgV2#}R$P%iM6RwUYzqz5q1Hdk#^bAqXEC>qH&+Y#N#esNcd(+U zZ(nV6&w@TnM-(laq4$3^pE5vXo4`xMGgWbd{8C3FWWwU8kzSyeexz##Lic?*2N(Bs zQ2QHcpdW@HY-v%jvJJ6EzB{!jE!9sAf;BR?Zs%L=rJCR8^l;BQy@dUEj_bQ``5R}+ ztZs@X{QoXZWRi8Ds^sdkkEJnhIerea8Ko7LHoMG>5W^FK5?j|*vp`~5-+6c_;ua}& zlBH9@+)RTn>YU!qSxmmgyg&p9KZwZv^nqgfJNul2mjO#_mS^P@D(rT-*1XAlF5KM=!0|4pR;8+F_E zFyd?t3aVPJck;K!>NCH~uuA#OWXMkZ9v-VSn~A(`zN_dZ&=M%Ib6GJcW!s+O>RPm= zJd`4Lo#+plYO;#T$wIsYfB)L&?f(oR_=uG)`m|BOdVN!CQXVVr_louVX96FiL}o6z zjg%qUA6?DDgC3)HV`E3#PMgMGD_^FM~(N?3~jS9`li%JZNP&uA~uM2z+v#m_sT!c`k!23x#Rdi3R zp{+((2iP$y%{GQM9n$ADS^p1F>P;X3!4_V%8smNweIYyGchR&K2>iSEMefxxljBJ~ za|W#wu=IUZYZw}MlFmNn-unByiPO-LB6UUX1WBJej$>%v(Tgi8Vm~Yzv;=BDn2t#b zi^Y-Ynmc*#Cfm<=A=;e1g;54JEJutVN$Mx=%Ct~y^fPY})B10GQ&bo?DCij6ckKMN z((cg()an@5j^BAIiyDxHHP2iXAvHtPF*-S_)4!1W!yklf{!xcP` zbm7>0fI~x*LET>a13$+!P!tc_X&y|)*m`?7D=$XKj#Z6%A9oJjXzPDIC}~p*&stl( zTp&BoAtu+#LuuSf!zlu=t$^&_ymC9&RE!e8;a*rn_{D61>_ zEIorrr?Mj>tgnKbvwP`_C$FY6C8M*)2aLVT>J1y4-nObZYdNpH%Sc?Ut=EXiUfur| z?>Ob@Mo^0U;KdD`VsU%s<1F@>G>|VxS@s74LN^o zeNfgWh=YUR{RRPj*8XDG>PbJk`Y4YJXkkbjd*o)7Avu$hl%&;2WzqcL9Ya}yX~2VE z4h6Si9tHK01RLI*FxcVeQP*U^)LRc%_Fv%#LKqUFlu;5Ad*{=&D9>f$HI_&pW~FHx zQ$KuN98FFBuN|9NSVWz6XZF`&A+(u&R!1 zwn9*W0&XkgF5USzce)6@8U!Vc)(|Vgxc?B#$;$wWJMgLdWdHk#f$!l!pH4GsWt;p1 z=(esyMA7HPJ?KQrI78@iCnnvN_6=2ldl!?rTNU&4TT@%?NqH48y|roO}jNv97h0Z zRjveYJvr|0NLarKXRJ7%*}TzO7fFK!`R-1&qau_X#(6;|dERf0xoWa@L zSfM%=)IGMZM6x>m{A$?BNYB6XQQP}c)mv_~;#VF?I$ELZ=}S;mab&cf25a?CO3b=8^E=h z$fMJKJ<(8k=3j+E?Qd4JY8pygHWU_nuh@r@caZMoK`MT~5v38Hw3ZqXZVh=i+Umfs z;@ZAf-*RjEtEFQt>A!89hxav_w`q~iB^X*c97pbJdSYz-9JMBhVf0}b<#klG|8x9+ z2wE!In6cZ3z!KeGKJh(ei>^3oH`Ow|?N_Mtbej8jZOgvr_>Cab$&CHsidp`!wRUG+ zT-)9OwEbSXq%}y!=H-`y75AeqA?Bq<*>2z|ff`&rj25aWpBayXsWMRDwsyS}9~ z@+5vCJVT(tvjN=f>TWJ@+HcMMd&>m(DyrSpNovpowbjdM6{uzpi*@EbYWZMx?0zd` z*TGAF3@Lg0f?tFjk^iQAGDl2G<9cYCCmh9GHDg;veeaQ4>Ysvg4&|Vjkwll z6neiVtaYy`DAd^(?Tm`MT$gDN|Mu4{#yUR+Pk!+4xnc^B_#0?dyZYLRLvv#XfkrsB z`$}9KKFhzv;WM6L@1OP%24;@R@BqCjF10YXTDZ5|Xk}ZGEScvYHRJ~wd7M1BNuw8b zm2qf*R$v_`NNtAog;5N6m#pjck{ldYqyx=}-x3@y8!B90!ef@o)Q3H0-MC?OT|psv zWYpJgRJxQWKz4K~YQF;(D0$SAkdrtn^1%Mj;jHDhJA13irK(JG>1e~kpd|ix9pin|!WqWxfGKZ( zNhr_4BXe*>0||5fF?-u1oQA+ZQmLNU(qv8%mjsvoBm*cBo?ls;AW&P6SlY)a? z&BYh(M{9z2X3;)a1>3|W#;CN<aZ9#)~Fpzzf6}72k=Pw_0)a=AQKJ!2j=%*?p)l5f9#* z4@mH+o3tq`tWD1=2Ifqz&cc;6x5vB|e%=q2j#uAxyP&?E#xxJi4d`kWzYHKnVR$Ya zELTy5H~OY?Oe2yc^e4n38zk=XlnT+!FRChk>b!)fr+4NadsX%6)$1V-Dg~~2QFC%F z7s_DKEIX>B`&gkh3}(6&3de=zs4t}(rYg9Z8aC=9NTyOll-!D7B0X;F!;=Gx-X`I{ zA+&c?t6-dJdSX2Tuh8lYqR{(ru#nKukCz@y(i$#5RawM@J(C77c|#d3BGTi35JUf* z&oK+*N%za8V9h(V(*bHgSuSf0liQ9fpLJ$2zZ|c=AxXo_DskQT{u1cezanPhXep6A zUb&_PQk1CDm-&Yj{?F@tn-^}?2^h2YcjlF4`r0-TO7H3on7m~Wy48!`D7E!CYnR9Q z5BYJ~N!j4;gN@Zdkt>w-ZbO@kqqbom|9MvIs=LUQC{_TnZkMZVO0CSoxH(cH@~c+b zxj(-*4d-NST0%mex_M0Pa2_H_0TO=UdvpX-c*ohJGw~t^*6dQp|M3OFx#z$1A`{B} zNhKlx6#k*SQ>;NN0{Av|r%oPeUkki(-S#yAb_0myy<)3+1uRRu;u(#dgu|ZzhNYTQ zihV`-2vdyYo^?~m7T57K+5f&Weo(g}LE5smFJvE`S!Xi&-~V_d>PBA?k!1=mum?tt zxRmbRWT&(X{TVefd#uIWRsqGM%z-&s?C9f~|BS4hnDn3mZz3ffA2!8tw2x)>&#Jyg zbG1bXDD?~Su`fOP@1HtNdKuPpGNoi3p2Kk3#eWFq&Ahcr=h$MJ?mfxT^wgkpoh3&`w`3+Nex(e~IRIR7) zarv-9`R6m2nGJ!i?kzCtZ~uAG!!M>1|J8w{iA>WjY008jyQ7Uo1p2SF`}a>){-dT( z7y%^>6Y2l*?J}8l|A%wkv>H&~4{*zeQr<|NNE7LQhO&USkreu`9(X0VBcmf`@3Wi+ zV2XqGmRdbHrQ&SWn;v$?(yZ0{$WNCJiG6?&;qsw6!p&*)7)?nQp-rD7;Sc@$Rd|0m zrvBu(z*N{yer5m@wSIN~6S9BpxdUjIiK*&KuZ$c1 zR#<3$6R|nWw3#LRq3@_Q^~csI?iy40&z)>eHhfB0i*cngl3tYg6o5_cXx}ra)gbVu zoemWPmSCpxXL)u893gl~M3@@Qnj9j=mnUqoGx3({BSy+pZ*v_=OnKe8 zRZ#AkNmk&6E0dqFCDjSjx;8$?hgz*R&D$RCf+Slj*Aus9;=V{Sz)P+k+{QI>vQzeW zTfZt%YmtpIDSN28TdiM5egVmi(Nx*}zlj?k;~5Ei%fvH1BSGCqFsX@gw~Rt-XYZ)& z)}QHyMm!&!^$JOD+6KwK7aA?QfdlT380dbaS9fX!EpiHIEPW`F!=Pi{e>|Ms0F@rc=mD7Z!ko$#GnQPFu_MD$V6j)3hIIyh-EAZ6>KUj35ckBgWr5Y^ImN#FC7S=azvp9MHq213rIfS>4=`5(Y3O3-IR?u!%Yty@PZbPL{f6O`s zxD!0NN*FqhKh7RzdUc%ys|<61C<$jL+;RJT)lDt{X^Cytu=6;V=0h(e-@f{+f?m4K zfHa^(j3hgeA1G-ETvu}D#z>9mEU9eTR|K$bN@t&&JhgNETlN zgeq`4YBjm(SbD1v=Wk2p7ixw}5kt<84EM#mJRsN2zHU_GI4eaB*>BZggxxp`xiw+# zbiQ3gs~O)(mE8hBwlj#fu|QVN+l$6{QShc{$a=>$AsWoZwv)ATEIjwhhhAk$rZC}U zO=wLnu4%n%NqF8OxXp(L)dw!<}{K^ zhK2WiSj0Pwa3q{&c0?%bYwR&{R&?q-@M$Qba8PwGu%%+w%B=JnQ8((pI@`=deaKmR zcjsJxlw*hLux|F;%WJjr`~h%s`uZp8@1DEjrI_{j*#9cgYbq^e-Y?>Y2U$jW!KFdXf^aitzqqbCx-!t z;0XXDI2Q=qsjB6Ke-@dkjccup00=yfsj~-Mr(4s)2VS!%=}W-o24uasCJy@ayaoH9 zl+OfoNj$3@n`Sv*vUmB4^{7gP&#uRptmtH()x)-un+ov4GIz2uhk3Hw-ixVczA+@S z31}Xo7VfxJA@ALvnJIa4DEe1Wz5{|%M3-?WH|6<`=BqfGHqGiCHro(9^rWb>E3PtRSA4})ZJ5`5h1Bp}_~KTg z8JmOZsLDBFC(||3`GAtqvo0yQrWn24-^X}A6W~1X=_{jM5QZR$coO8Y$4dBB*Zi~v*tRW?e$LC6d5s9{_(`h)A2zFwly zJKK2hXOGh>xD??EW|VoM35(46ex`A#dME#{ZWmc~8Sm2#FsJbP-VZBGdr6a^>E315 zyoRd>cebTMq%|JRvm^`p^Eb*w9?SGt+AG1Jldy&dMIYYG0~B^Z(TQwtB{rxqwb@S= zhMbpfP$!xSpB&!K`32lYpRY6s1$F3P6{sod$tUC6R;`G zC1e5yU?j%@GDuq&pL7;^qX+1t8T$$T&RZ}#unTtUJ8hZGz0~B--PT)Ad&3U}g zY1I}+@z-|I6O-*ELBRK2CUOvo3Z`gUFmy&M{{qXDc%-vAB{&QbEKIrokvv0gc`1~u zUSBUb?Ky|pNcQ*4AKH0Ht+2z4D;zJ`JcG7!+N2^$2Q|4|#2N2GZ*v^@ZSnDf6Cldg z()-OSz)&&Z(QkX;{P(+S32D&N*f65_Cj*iV{1TPG^;U0aUv|867Lqz3@C~SEQGTRf zqDO#c7f5NSP4-qsuffQm*^f#co!C&5{o;J5&7&|vHI3$(eVdYqz-&sG`0%dF%`}M7 zTD%H$QRd`D@yBJS8utl#XZ`!=#_+1qW>=v38t|VP@t+uZ6ID3Gm)M=GP>oo7ARz%< ziX~n+7L&6Ynycu@CmeowsOc*KGXM$S|sL7FT3!YQr7yN}co;t7q2N*Y~;Nr@R^I2aY((dcdm%1SqZoE8soaf))t}*Y? zBN(K8z)971ZdPM+xgr?YWb<#p3juxs4WbFxcIlG zU_?x3Wh{e4F4T6pgsKDqr25y-D?XS!o0V}N6J)Wz*Fv;YTZK~rc|PX_{eF$MI?;oz zrye_{oFBJkHxqaLrA})9409G_v>gSvqsrm5A^I&2NQt8KPb3dNG3(gb=PZb5_f-D&d&n<)N-JNf%is+&(Zn4 zeF5iXS2JzE{P{5Oj&mI2aJ){BPIfT_5%vUf%f7pf_8Xd^SCHrMJ8gkdeEK70@KT?2 zQOMO{SzR0H_3bglCWFF5|6ljl%Yataqoze;B!)>iIgY;dYi)+^1H9 zg6W(MRWJjJ11{P7%%GM~yR81xZ*ws$Cb3Y6c}-~HM2)_p{OUq=!(a$c+hMrbY! z|6G=lV7Tvt3Z;&f5O2f4iRbtX_InlTvg<2Ng=?5dLAHJv*(abW|G=jKru{G5atV`j z;`~ma7YiT?(mD|RQk)sho5KSIH?!V6f2i+rVN4?KfJMwrDt{rzm|t*BBNk*uo9Yg= zC0VWZoKikpdm9VkkF#u%^EY-=ekLd-B>9Ej<0HO4-+r7?t!SN%gWKUATWUo77kpoV zFkbJ+k?g>8WwNm5uDwnm-}BcHMdL05+P4|-^A}Bgnz!e_&&Ak79s@-=EMGyFzcFMI zf9FD}8w*HJL}qayYcXy}A0`3Ab~KfRLQXqa*{^g)!^PJuUwj1S z83ez0AWDhlmx;vU4vN^rjLXqJ9;Tu&i5>?^t}uCfFJsg`??56CsgQ^J#!3`eKOaP| zLQi2AN&z1Og*Dlm^@wG+)PK)g1^C#{BimP(GriJ|@(erYh28YPBbM;n;_n{V)IGQ~ z;Nid3S9bmT%xg*9ujgZ)7pQmLyWn)eZ(JPRDqHXNS)Skxa~&elnTPx^N0d>2dL;QM|`eAd=GYzq91zT8LOw8&`F~bW{Ot-YXn%ZJiYK4ZkP3ISdiAF(ouN$e`NsQ(DtUp> zI3os>C1qL6^|j6iy13)^w!<_1^Y@W|ra4da^&;rHqKe{_4z{Dp1yfv7or}Y)rF3Oz z5{4V7_%5}EHYaho0-$H7?|Xbz}YR zT-Yn-sql(CpD)|f~IyzP~aEF~=f{on=6|tLKqhv*kvFDlZhOk<-fMO~9v- zW(KUZd~a=f1G;6MEJh^r>Va!g)9F#Q;b5_JbSD&^a~p_CeAAbBPNx(1x*)M4}h^rYtPb@sUwBW7X8v7DcnqfSwYk zXk1Z#!BAMh0!`#s!{vGMDCah+zHipQT-*>-q@Jf}%}ckS7Q`nQtD2kpWW6bpt<){I z1V)p!dh}=focTzbx%KPM9~-8IT)ptxF!-jxb5-qZMOtL~sU1oo^t&>t8)oGnSqXfX6fgesep^JY@m>$3avp3?G!{LnohU4Sax3`=BRTPwF-&G1u0)Ub3+G44SDbE`V-0nA|5?(K_01!P$wrsSPqt5JWpLM^QXpZ)dwa2-AS ze6=*w1C_S_9-PJ$!`SyIO5y^erB&O8F(pkS&>AFZnYqIttaNX@I}Fq$!@ghB7Rtq* zg0Gp&^iF`wVh^ui?zU-sBkYeFH&!nwlqKW6>=xiO+Y)Qe=2@A;Vs`e@b97$7yf#Xj zj?GZ(U7*8O!P>A_Y$(&%>`j{fSk!k`#D^s*nhYVBXKoYQs~b=(`0E}ypF2@;MPwJE zgv|Sb+7GggO)d_n47s8e%?%3<=AVW)yKM+r{n+s&ce?aDQUSuaVX+Hl)Dx{0vkn(Y z?eZA>c40uxswMYgr3SX>EKR_Qo+cVs9pG7nZ%K_zL)l3|+fP!TO!iG47bO?!V+JDK zrsZr|($@)ZgNVN6e-`%(#*qmtb27tim)|_gNZvPGW-K8>@w9$6oRX)%q$pUvB8G6K zoGPSkGdX}_J7T@3drqxTtZD92d%2}}ydrQkL1~!~e@pLX6lt4tQ-<5GYodPz0-Wu)?WcnrDxdhDB7}&z z6cu*#?I8S<`b1aXVvm8t^wTswHrhY4FPc8r9vy~7cUQFtw#Qs1=GJ>=_ ze6v53y;DwBf;a45@@~-DW}h@(W}3QfW%YQsr82e@a2G$V%AK8(lH%VIedZ(?ARw>( zO^g(|uL3zs6cLQqw3}JoxbS^d;zViphT%!rt;ugx{Mtk}QTw$MR|r*D-CCIrW>4%w z4gf2evZPjP7q2V*Xf6q~<~_Yz8IbWjzr<^ar&Dc5CVaFjo6I5!=Thx&!q+{^Zn^C2 zUrY?+i#(=O75oI=()rZ8(=ZC3&z;X^Vl@DKVBS-smu6Ag-E+E_zUR`Gc!+oFQ9Cj& z5srSFF6!Xbuugdc%&4hLDlv657Rz4z%>}=2^`&?G%Ft%IeY6!$5eo6QhlGxFr`o0P zDn?vk!e`|>;Z-q=YR9|10j>T*bc4~w?hnM6)O5|Ya1~DkklYhNi0;g8_m`o4|G+a! zo=SX@{t#q+DY2RXRoQ9i2t^s^)N(hT= z@ktljMnD~D1jQp)YVFglrWC6&P|2njMITblvYZU>Ob!Lho-Z0oL*(3p${QK|gg>;$ z!9F~+mt{^?kY0J%;hNaUD4qr~y!+oel35Nl{*o_pS!Uc^eoT|Wmxy@>d;Wmq zy$Ps#z=l0o*vk{h*>>NJm>s;D=CSrY>^`(rYHuEhT;qU(FF^lWeq~nRdTz4{$7{vf zZk>+OW2s6i*BVbJPFgOIxFgL!co@`hlRPL$Mjd=HFa3GINLtBoiD9?)vJ{&8Wp105 zw<*d!ZL~%k(#EY#fZ5KSY~=Ebn9AWwT*fa9tex#udZ>e>fcTRkn(dp^U~2sF1AOi? zkCbYNv*tyE?O*SVO|7UJXcIlE8mrifD-PRV9>)d-$_Lf{&AI%qB+-!s{RB#ENuYrH zY12-pgrao|_v0()1GOhakL{p8p8L-C3QPLCDW6OJz(UO?07zN#*!I=!Iv|(FjSpBY zyWUqSXxg|#j}g@Z6AMN~xU&qtit~fO1H$(3tY_=uHGwqJrdt-Wjh@HQr_cyct8dXQ z{Z4UV`=nqSaZ0_g3oj~b^}P6E+X~tCQx$_P-bNfg6F*>?epJ7hSVotiO41wh6jY1I z8iw$?#}!i^jGJZh!t&d~@p^;qpch5m9y1l*@6FpPvOp3D@%Q{StB<&-uM5ynvRLrU zFhBkWz)J@QYIr0hYB!11kxl8@9X!1N4I_13)SZ{)B#{myIPd+lnfsQPXRZsR-E5*) zk0nY&{aH*5?K1NrBS6h*T z2%)(R%wf$)>LCB;`60z>8Uyj?jqE)m*)0~TQy$>(tCl62L+sTc&QWUEHfMt;^a6DG z(Cw80EgL@0_NWk~4K$q3g~pUc4IFIw)mcNbrB1gu{Ygh8luM(oAtD!?2ms?^ zQ-Y&;UguZs?1tGkQA@XVTm9oS1K&xk95?IF&m`HkniXJ*VqBAz-KQj{K91%Mq~v?l z32@xTBD=*17V~vu^3|x2{I8B5ZE=ud9jVv3Md2%&79?7<|l z9#ZAfZ1p@`zLvWldqwx$Zx(utanGv=Spw|Uv|{@1SCUDD#Bl>-uAR5WD82MW zxhzo^;|lx#oz5fT>9{UMLyN7(3JpiUHK?`ALvn;JFj$7R=x%FXq?Epj@?ATMx$QYG z?@ybZZ0jy{#fEn|c`hH$5}v~*!~l7uU76t6xU<-bg%#dfnAFKba(2{;!DpmtO?26# zJ{6MZ3q)&O)Xo{pq>4%~ZTEG(_b=N837H5)I0DVpNMJkPVc-rz6x(@HA8|41xnh>$T@V`qR43r`KH6gQUDsDL> z`UpP#kj@*^y278!?xU_$B(J9IRTo5LSI6&w6)kxwupBZghSPr4t5tL7AS<4W14Abe z&To)Y!AqywSvub+Z;YnVzFXqtA&dHIcsp@Fg!X~B9Et{n2>Oz2Ct+bK+p{h%;`py% zAR(`1Mwo~k$qiyCVFbR!@-t{e3_=mf>w#7=vvubi;`j~cShp@Uv3@oXJ3YN(7&@$N z)p)J*a@Eu;qs2CDs;tEsGvrVvEgQJ9(b0fkYQVsM-L=~uuE8R^t}3{ABRqCimr)JV zDTvfphdb?DBWlC{+A}`6;1+oj64h?Y*wZI}(I`<`w%)&VIqaE61^benp&+j@tp*|g zd3ZZo#BX%ssg|%HL}i3=NzHf4qN(60<4SovY1M{kWv{N|l=LfA?x0tSAtFAssU)Tb zHYM@LYDMwJ=xJdK7Nv-z#;z7jjEh)tK~B|8)t}5x4HavV1w7ST|4Im!Z!LU{{o8Q@ zX#Y!1>^Sk}rmlGri@a*K2+pumw-QvXfmvGG=lWwc!RL7sn+G+1q{Sfwt&WVepzD=l zbo;cXX)u1t>i~V~ajGjw!z3M~7kW0b8%e4rit`AT>Aa$IYhZEL^@kwrSkkvKUwhnO zazIzvUWEyqPO)3$Y)K^;afR#Xi^}i4$>Ows{OjfwB8mEM7YM}~x^18?vY-&ZYpZRJ zD!jg6aUd9msn1_Yyj)}h!JxU#@|9j0bID^r=ta#c7}XC{^Kx{IOg3NL>U7gyowKi= zsw(RhKRv~iC6PYNL)qKkS%nyi_90WgrxaXgL0*fXIytN_`mPdxD2YbA6bp0hg2aDk zc(sE_%AROm(YVOzp0C--40L*xY|;V!2R8HfDGT||W(uC+C1R-n_2LDvT0atB7CzV+ z$TaZWT7bunvn+txe0jR&?UO*v%scI_ScY%qn3bi|V+OwM_Q2oj6fabb2`g`0FG^#< z^xM`1mn8?z{hC!DyG%AdJ(41b*g&F!`lq{M@g&sVW#|A$*Tr@|A9Zp-q7x<+vH1YMSHYZ`DJN@o59l$4Q}il z4~IK{K6Nc?qm4BZCVj|p_OK{CQa&%>lQl-~w%R0ci2Fmd?>pT6X<5;F-rsM3TpAf{ zTNT>aD`&0L@Qrs(Yg3k{`$rYXbN&BX2M|ANu`9sa{2F$9I;88Zz8Fe{!$bFDq(UnL z_qMyH8m9r2rDD~2x!*lY_z4-rOPKI$h?9F{OxJuw7`;@E$Y55o=Q68Vt(tsh%5*;{ zSJ6v2lc(0)6|p3_!C|iGgo2NML)TtbQ*Lo(lwc$e<$cYIX3ueav!WI!T-D)Wv%;H& zkX@!Tr}{ygSahS$>2>fFGa0cq`JbhVD-Ns-1>#)3FQ!+{X=wwQ$*JD}x2=v~bR6Fs z(>se?2hi@^46nhH;{5L@WFcI-@2l!HQjfba3-z|6A!3^&vZ;?laapOiagw!O3I zkxJALBrL(N@PC9BA(X=WbL%9}{W!S_je+ zCsV4;Y}5r!A9b2l{|IJgM}VMi9KezDhWp{F1#D-ttr zQD^D&HVmw$@4vbwxLezrfp{hf^Phn%6kYCXU*C~RnH!NVuA=5@E17n>>8o~$+=-Xh z$M>g&fIy~Uj~G)+m7AoIvqr|2sb@TOpOmqQ|BuWcW9?KyHn3WYmyb-fb+7MKy)`Sp z$|1C~Xn~}+N981uWTC@h$6z085F`FiKIJ^DB(_c&KXZv?CTTu60R*-S=*p4B zD8j@(t7w*%f(R`=EyT8za~oT=`t#nC5c89J{niAt7Yu_4>D_kY4S`-7(Y~N|f5&1# z)77lo{qBAm3QdTJh_DGXfj~yMo*nyBA;sn_5)m3|^8& z>y!?qmuGiXG#tdNKJFR{m1@p!b>ObR_o@o^B>78gB%)MN`URc#!RhTVvF`09k25#n z$En@OeXA6Q)`$-(Jy-ZAADTz{BZ7+>3FrMn>IJr%1e6{ygKrJf$$YR z{ZArqB$Pxg|0PQR;K6JGutS!wWxVOkJcb7d=Y+#nPXc-BZkl>!j)_EL)D-`Xe4i-|}8}{38oIbPUf16Ns z>zf8?TOUczW!(4&W`mvzp$w-bH(20D?u%m+&<1KJJ$j>{fnULmHO%!lHRzAvCMyl| z7!SNhjAbw6`tYHIbg!^dF_qBK7yX=x*mb)w017*MSwsI$zxSx)mT7=5^Wmy7w#)CA z|N5!x6xPw6Kd^7D?kAC-v$IIV!AJxy(;6%SFtLvR(0Tw#79v6;=NOa%8&9<;&A4 z02V3yU&e&mO~4S;6s$mK)N8pPEVykH8z8%0EN&#BX1Kq6_2xFqf%}2ir zxJZA_U?261wXpJlr?lG9os$Y3R}9J4@ucU-YLx&wFv@mfV-lx^zoDFlNFN$TPxV?M&O(bd&UeD>Tb|vD9 zWQzHMFM{=*ldL2MiyC3Qm*sauep@E^Ldu$HW?v5<){+PQfh1^N@WO`}0K2xeSKX)} zKea|n}z&8Gd znq2v@`>=^5Xp_1~NAR$x6A4u#qI~~#CsjP81JqNkdW7X~luR8Z@g^GInaHCQ#C2Ry zk|u?`1nI`%_*3l#en*`P({A^TwK@}ir#-Q1aHAY-Jj&fN@uyYxG~R9$&BwY3fb|!? zUG>&;bC)}Hkfp#K$(nTY`ayf%U!Hm{bKg4lzEnM7IB;VB#nJnsR=8rxbKAI2N) zTrC;<@G|xSCA5(#0|k+DJ) zUtF`L=2%ehbcHh1>IC-Zq-HMb&zf%-_C=9qAG5xqHo0bT<7?x$v$@?Do9j za=fK5M8vYDbdJ)v+s#K5a_ut<&BLb-7fI%%mW&t8Ubp1r zszNpk(ZzDFZx@qq8xcW$-~I{INF6@BDIF578{4mriMvuS0U>c{mx4l**k~e#{J!3P zec_~W}zu}aCdieGx!^G177`Vw+-Lppv8Bto)IUZ)CW>(qPK-<|BCvdMmZ za2=aGQv-n3;y>=11&9J~>BVnAoe3>w9RHd^W**i6d;|pNUnhkd0Ig>+7vwpgeM3~_ zXeM>3&EJaePky|(Kq^!AHr45^XRo?tToL7VyWS63ESYY0r^rEKqBMuPD6&>BOStF{ zOqvM~L>Ei0$yp{kO9E+apNF$0Ohednqz9`8dfY$mG|H}S-RO`s>DiPj&zTB763s{i z;~4mMOtg39O!4Bc6jaKg2WjK6s*!$RsIRGaJSBhs3ye|E zJ?h~&3tzk5#97@_pnFY1IkethZsMKCJJOu_0EZ-;BY;Xw{S5dRX7{`{TEhQ;Ol#Gv zMy(Q!wO0RTl!M}fhyjoAXDBnpj@0M*G>7*q?IHVVz|eqsUiVT%)2Ciq;g>KHD9V`H zzP<0>sl3j)i3}}rY(j2_#~(UvI_zVXdVVf8f2A#&pYz2qLA4p5BqogWzHiu=4m(OA z&6EGeKo}atgZrx|o(_JIT(KLtu;B;Sn15k$vF!`SWxFFxW3Itcy~VFM_^U~@`wT>~ zB#I1Nc@=}Mk)(#(!|$aWKcV37tuh(eJ2E6CN%-ncv37q!{4(i1yyrvUVG@%QW`~rs zt(q;)s8;br6Q94NSdtQLV#zX}D!TS$I;;%bs#U52dnzduh5>^}bImd%oV^!O?-CWA zOy<|?ZG>SL$1j;=zN6|sbW3AdvV-2);pny>6VfXN8O?&-8-{GaMhb#4!l>UA^P4_Y zObAUY#DlSu5jZDEM9*%l`Kw@Ch()AQuH7f23!NkxItuC>RZ)>cW8VrW^RZ4Hi(QB} zi_d#&?y-hx+A?eN=98-YIuxQwfYJ2NviuU;=v7(h!$l zC9Al;_>N-aeuyE{5hy)0DZazg%o40GwZhaE!MkyvCod5++8$&m^H>Yh7sdq%QcG7# zPSx?2ckknU{NM}Xczh-qWip2Gk%}>f5{B1bG&)9UgK&6w{S&m08T`mY9mtR+_UTdg zY}Vd(W4(G^H1b7>R=<)_@WwQ zMk}1Jd)dP2?$63w#u-MtPZ!4jCX$*}fQ}Li&uOy` z*{QnUmsXh2dKAngm9N^&SsOpE=z)ma?Fq6MM3c12(N@<`quhfA z!72=?iq2~-*9|^Bx1FL3saFc#FH3p6Qgp2?%S55A(HD@n(skIDHZp0)+pDGc44Zxq zC*eHP>l&xgyyj?%;YTmm8Q$;=7FO*lJ=HgC7kcIjGR1012?`2!ii^}QJXQV)^EIx-mBLr& zj&DE1y7Sld8H6hsyYOrOC^kdY?Z1+(XU$ce?etoG@ndi~E=v}{fMHrq+F@~XS2j8v zVBAk~nBrG<9z|vqIMA=!Cu#j9B0YE^{y**ypMU1*N_&}Acftpp)31X#n9t%0i2d&^ z2t6l8Pa}*T!;s%y?px@oNl)l{_iSl$M>g^}`i2tpfs<|)a=~m;Du6>#RL-_GV3qx; zR%I!iqzIVStibeR^yb$>rq_noupqT22P|`GgPUC`h%87JO7O3pVkks zTJe+opX*Rx1xNYUKW+940Qo~|(i|W_P!|Vsc|kc2+j#!1_+psE*0GacFygQN3W|>; z;xAE4P72UBkBs8_!Voa)w4tXsuK(VERq)HNvBRi1Y(srv7gkDkaTE%v%eCAt zj4^$6uX{MP*SkI4_qcTYspp0pdIRBRFKs(r-{W`g?kLgbp9KO2DF!WG(@e4di&`fo z(VtMP5z>9Y_dgW9k|98~Fd5j!%ltl0n&)lPFYPT7XNjWqUZnCE=CD3$>H1*_J&sF` z=jukUZZ2%(fL984A(+0aujHmh)LyrR?R72Ix{9vUAL|c?pFZiu*+(JLq4JQ6rZO4?EoPhQsyZI~(lJFl)LU&o_2uF)UU;nxoI@KMmRV46RBSCMnrT{h1HT}j zx%@h^q4EjN&W9#o!U3Sv{rdOL(QD}CjHUZW1g9y1|FSYmJ`#27kFaxeV!s|M3xG17 zq;fL;?I6DDXgzto8daYsr_pqlKR;ZA1aX3sjqDae#S;(H1P=0=u*Yt95(xeY$0-3g zcuuwG+)@o;$?T7PjOaeo^o8rBBtIzs^+1h@5A^WVT7ZeTZxTK5-!$3)1j_bHm4z()L@X^9fJ7r5)b=((PU-`?ym>Wfa*%^sI^SQ>|vlf3?w3TY5}4nA|mv^aYD{1=MI*dz(m_NT|m&6PezIz3HMGPzsz-0W6Wp>dXO-sBBVmVi( zeqkFu=2;i|cV*mG`jLC1Hp%INj|6KizJg-oCU`VBY4~FVhgi+ryh(N=yF0gsXg^jd z6&#*Cfha^BFNsWMfdKn(?caj52v9Hcf$Rct5xvqye{{uM{;xaWfCx(pUxXOjv)5k3C6-mb2Q7%9fbDu4}@P4!UNOFy)~*6 zD(ybKddv7xo|ST2O#sxY6I>2WQz&;mK@|#&*V8`=cIn&s=0PFT+ZjXh%|R;tZ7k+= zG!1%9O>DLk-`Ee~R_d$JX+%h%@En)1t&STQ>sOryv z30Ph1l7E_qK3~{)7tkwYv_$sR(Quxv?;Z?e*efNL_}DlD#yIGiP&mw0;TN5@TRPu&-v@y673p>)@UB#V&yK8$$KeczsStgN35>U*pbhWLFU4ETe#G zar=37gwBLFzf$~H@{hIJ-zxSdA#XIp1&002TECfuUNY{yX6(W~jqe$Vw{JdqD7L3PYCOVK>Dn9M7X2`7zZqHb z8fM}+$vD8E#WgTUF^D-geADWsQTaB^N?#|3-cn3S$9*t))h*|FUQ?>TuSJ-Py>DYd z#22cOiDT^v3=3c)qW-0%{~l~m4!{l*dQ#AUH0A}rj;B7GZ_cFMS&VSp?n`A9#e5zbMhH{IT2EBxG(gl0D@H zN)>N!|CPX{!vhjsqKebjtznpq_s%V?ZVlTr8dT8ZiJ82{v_cc6&g{(Bw1@xk1*CIi z5!{~Y0%+{3|571;ztD^aAcsZi40mBV$}2>k|CaBdbBxW4={jM7_gfh!VXxlHyh*v{=Zl5}e>MRwgEZ(`pyw?=MQw zNuAm9!i)ce|IUS>6?Gg_Sox7xmj)~i1hk?d;bwD3;H(9TfQc;YWj4aM<34AEhy_d= z1w@#yg8!fQX-~XrUbfyR;&hTaYF)@nK=W&sMg2zU-}f!ZorXDup(5N z9}y}^B8m`$(apR#a~&Ijh--yf?bB|OYT<*1=2h1^M~kb_W_2bOH%zxVv895Kmc~dW zDi`BpJ{+@X!8wk3c(8Uy4pkZj*en?Jr8&csLz- zg`x^oj@~QUe0Sw$mE8lxp_oHh`sM2#hkW#jjLf$_qs z{%3!i41Egue@>MTI8~RSbgR_Q32=4h03;MH#X{J1Z2&>_$n7as+!Du8YgOPvy^G_1 z!n({xLn-8`mfvR#VR4G~(>L#jj#y~+5rUKAlDyqt91NwNdS!;s!f2;+6n(+J2)`>I z%tcTa(+GD6nJgPF3z@=O?8W<-yX!BH)aLSLy-U*T@HXh$A15*0lr`4)u(i;Olu%G~ znab{GP~6!&NvD-T$4EIZ(fF|jM9nJWDvYYSEqph{@q;U|0-Z2`$FVkRRzJ!Ec2}c+OX~8LfuT+oeq@Ai=Q5#xWWJnLzOO7%on7XG8QthPfm- z8dZyZ60a*+^2mk-z4fSsPa!}YEF52xzqH5oqpJ_ zdT~~G+V`*VlG6iGO2z+_>g?S8DE}0bv7y@4z0h>UacMxQM%*=Sz z@yjU1xFe=rkNW3J(nVxhUwCyA?q`4emdjs58!OHT+U37zyCSu-ptCWwFZACWK5-Wy zRv`2)w5Y6Pq1@ql%$P_J@mFS5JRMa9@#O_uqASwuV%RL-PVX4**{-7w_<*M0k>vU0 zCc;zPQUkdlK7?knqedEHBgZxxMcFm({E}Aei2Hl~P?$+{`S=51*2AOZxo91kCidI$ zZvORBwYPT`M{Zg)tKZ{K=Vg7Z=%&0Sc^mwpgjKGm z+ov-)`sLTl0r=B-yVTApv~omM=VrjbVOwAK?Cw~^yp4+MhDfi)c4zZdFfSv5QOC&M z2D1KdtPRjoMvo*4GSxWxzNncA0BT*#^P?J++C+bW(fEid)b*xT{mAWYQagr~`@|RC zJkzzSLqi-ZgLA>Z6Zo4x1CyX0?4NGrVZi360Ic$7M!#Gs%O1E;Zr_1iYTq<Fuc{QAY#n|{~*qH5-o3Lmejw$qP@*BwfSe$87f=BZ*xD4{32@%|iLxD!RGqB%WT zAuw);R|Ws=QSPyMH>N3ZTdLQBVA)Bs~+FP(c~LU$iMbUSno z0Ong!B|n!`uZekj^UTMfZtwXb%jQkq;$vS8ucz*r_was8pKf@are87SM*RLF*p`p@ zXr6-8>(Z9c_%9?95Q^p+VsQHa$`=)X*@3IBIWNMgw!)ZZZ}F_XNUzODPN15+aWB*H zGraE@W=$tVW!@AiaP|eZA1m?WXjmiRezCtcTrHEA^a!Cnd(O{!u#wDduSeBcyJsh% zCkM=BGx~Fdg7Yp|R`2r)GFI*v?rDF$yzI`|++(-+rri3ue0{S@(?s&bBG4e!K{S++ zk3lQ`zWbF<*iS)7F#{udS?0Na(UZ`Xm5EZC8T-i#;kCGpO13|_sZO8v(>7vwS#G@j z5X@?HW!W~|B~AEA0a@mBj*ArJnlJ?|rmV)nWDTK?9)Ax(Aap>ot#DfnK|w zf=z@olg?|uwu+>m3atC`0rP^Z5=EPg!vE{}o4F7V=BGQv1=)SnZWceBUBj@U%{ZkS z-`FP~Pn8uqGp5#}$X|F4Ohqk5kl|m${Ckeq&kF#;&SLpA)Vc346KR5T-T-6Ls-<~Z zS{;bf8h|S(BS(^x)5 zTQC5%%auYnlia)Q2H!PgK*HP9CikbiqCCWwnZ{oKbS69{`iNRIei+PUxSmjz>K?gZ zu0HHjMr2DH_*?vPojbnL4@JU^xV9wp|Izi<0Zo44-!LF5iUNvs2?jD+x)DT_Mi?_nm*hWIOkLu5-@S_vd3|q3Z*% zg8h2NJ==8J0MPyb1TdojI7B^jku$(l{aGwT${WlXisrSOI!^bQt0)J+iu+VB=~plX z6HKAoGQw)x>;UEzjy^_waZNR`v;|5e=(oO+`*Q;3{&Xb^jd6vQY(tY;Be*Kr7`clF zeb=9xT4-t)9xUt4>PGbac+iSz#kR(O?1(C=YjG<*EVU;A_m0|vgMU5Y!2acc1bF0~ zXtS6cBNpyuTgRp7+C4}WiJ>kI7s5t!b6r(-{=ged(l(;_zclJGt}O6m*s~yvfYP{- zT3i#K|J8QGjjU{`&UKfZr>UC8Qk)fR1^~~Z#*Fo556ia5#S`5|uUMb?4(ma$QQysW z5LD;-{&VDMv``fUN43rD^kcr5%D0&cryjg^>a>Deow@R^So7VO9P9Y#I69s2oLp1G zvifPsvi#vE10y!-b*P;^XZMCD!~F=(&vKRD8A}IC^7ma8=XL*DaoUuw-{TV7e%OR* zm{y+S?ix)RE?9CBz%WDWJcUV_Kdir?XO_RZFTuIAl2B^%8U{eA#9{r8HO2v&kKvi=lc2}mDJse+ys#0==tF7qC<_6KA~Z%tW??ZMHp- z@p^|>>qE>kGK!%x8+pC;0?^B7Q9Y~eGPhh%9<5CAVR`ztvxHZ%%r~Sk-=8!h&X2*p z76B|y>hdX)@oYTEE&BD5S`iP6f}OSd01_zZY73O%@z6U&9{~oSpQi{*G>-t3vj^#DAce3$xmuvfR zYFz$t;(iu9Z2s0qFkbT)x3l`F7$mOYX1VEo=E?>#xnaQ!`zp=uWCZ54U%h3)Ig7dK z{fpSJB==yc(3va7(Zr8)*&WT>WV`qu(kTEH+$#%AGj-=L)drJNY`@Rv~XJo@YMfkHrtIFEl&qM&h zGxHf15;`dbP@pj&YtGE-OAW(pi_{H4F`t zbJ*3FXW&8u1Z5Ea4rMUrXDyWvmoHOGF*B4Jv)H*yg&lo2(w6uJuUST9e)>H4D}a?; z37h|BzI_n^n7fpdU%*2Lub2~tR&?4KRQ{FC-zb2<9s>9pfV9CK{(=ee(DYQ5*K6x8 zysIlV^?yydRTF9E2bhdK4!Sz(f9&hDfVFK_BW6bKI0H1s1rrQRyYj`Vi#Ji8Mp?F& zbnpog?T1ZDKMVZy8{fu8G+bjCDTF*?5??xoN7~(kk2kIcH*wc*oP9gGrmSaKZ}Z~< z3$hx$6QJl!Uz~r6?7`NGk7gZdU%O{6tU6lS!q6%rxf?TBfMMpL^ldcl~g?$?Vb>gNO^hnKVD6Fm$N6+xC=it!=8{-y;J2Ld^v1 zftt36bWCkhij~|(QfkRCwk{~cR^2+#2T{3Sqd)bS@YdzU86O<(0BAa8@Fxer1BA&E z8zaSXfWLUuiDB$>#LKIP|AnyiX~B(d57+ zylHbXr0YLi>&Jmu84z&F`Ji9<#!aL)c;1wvgY8vhL8I3u(Nz+-!`kY3B8IQhD{5u4 zdCgbi+j|m$msbyFf`QD87;#16t2{`vZJzugRfvK(6mfCj&aQrl-faVQA>o3L-cvL}*Zz+gS5QTr2 zAqNdI!!rBepTlW zDxx56!7H2vBWwGJR`=k?-vCc117=4gqJ_fFt1hMNOR>VBBo(#LDsJt@uNT9kZ;V!o zFJzoZxUS2?(nP4@;rruuKVVMMu1A*jfZtELf~5d2DI{iyW+E!`h_BT}p&0`+alVsf z5d*u~i}{NQ=V- zUs#FXLMD39?KiS@_I!Uj&^?&5O#d|utccbV_ z@%hAUrvWx5nu~#p?QSku9iB-`0bA?K^#2efnhE~jHV-WVTt14SQY>Hdm_xgvcwy7u9t_A@@-h=kn05M~{ZuC#`>4OUI!2-JBa z3pHh6FyH}&xVX$ln9W_!&zuLu1K_aSiX~sH%R(rNuc&?J(riqQ>qX~Uxn;Jx9y&?w zWVZS(#=tjT$;-)EQhsyY&n)Rq8Wv0N4W?|Lhs4{yGcB=`l@B+a-6&u;teM;%;>?wG zZTT8?`Bf|ZV2YTl$JANH94i`?rN=V4|C4#7ZpoP_cZ7L_?=J}y6Bzts<0f6m{>kc& zWH)}tSuSHJRr0S#rvCzn;29xhY8)2ZYp)Zw{^<>AR={=+b<~{V$%uL61+ums3N{}0EZQkq)+d{>O(uFURSD&%y^BV ze;o90lD&`aHp`!BSUCnLyxl)rTEZ%+1P!0PXD+>4i~ z9Uk7Y_WDmsN`>g+CUsn?Shx6ck?FEx;u@aT7u&1N)JtlHw{$%R+l8uQd`|~2tSJg7 zzro2vN=5+RLC;MW`wcXf+caqg(67p2xAdsyjmfmN zaOy#rD~K_r=_` zG2MAy0V}t*--TbyhwXhE%-;}b*j5pp&$CjRpWW(TlsG&nFBs{vPCIyBdnqqvyO~QO z28Vt0Gl8z~YM9A(Q2p+?_^q!Ka@9WW)b{$$7LJ^+%J)*@=~&{BhFbEe>|2Zg`grZ{ zE!B)irz3Gf(@ynJzop8e`-Vi9!L}~wDGa@T%*H^_KqlEST;>hnV)E&DtbRwonSO^| zDdk_h*;(IC9)zc>UBVO8Hsd{|EOZxD zp`T~VqaPt2+!M;&GO=Lr=@Oe6s9kZ0l)p+Wn>;@ZAeu!RV)*Q~@0)QakBqe*9!)Hc z7Xi?D9PlKC>L1PNCqTEVkod8hXgja`xbqE+exAn(YFqe~q{GDc@&z$w%R5S};t_qY^kDo@Yvv~JHjw}?UB zwIO_i9n*G4x4#ZsZ;bh(JeRWo-mIh!e^%F^ho*EhV_ah!1C^L5RZFon5`mpv@rR-3 z0IWpFt;T6T9K2&w(_92iu_IaFJ7d4m9Lk9qO>2%9BMpMLldm5h5)Ttsl98P)fJfb~ z-!o@RJqb5B#e1azT)FLl!Hf7>xhO6c5wl-IIwf-~GeUjMmaG)25^FhRMce7rz!gBx-plh8zrC{QUwtk?#U1C82NxNAn(hb(I8tmQKH02uYv%fs45!_7n z)9Z9Fl?9i|D>SHl&o5RGenk+!e211GgxQBW@I8*oSDS)ZDAX`0$+ZR3n|D-x=^$00 zNg+ouW_`q9Fxu$S1j3^_B5x<3n7qcUR`a&zeEWQ6YcKP%${nD&RvzREmCEQaC=ASV z4q*PY;!;1i+*vZKp(?3@^z7WUoCcLF55$(E)aySiZpk%lnQ)t0br$N$pdG_#P5D^; z-7ikza*o0)>$F1Huzkj5jtV4Vk{jy+S3g#faVV)J>9v3uC-3>UO36s`PBAfSCfLyl zbU9oGTLqtrt!O&F%vCWzmS+g-5+mX)_1~_o?`#UfF2(s=hIn&U?NtqvCtDkDnYyMg z%=~4Jfn*iDW>xdZNyih#{By7Ss~;zAUe+9J^2QR6GsAk{Y-L!!apfRtZ}`K~EQE`v>$;3Joa zQdYUwz47rCkSBsJ34tNVo6}2k`Xu*r?q|gF+ar!yx|A!6kfmWq{YXmzdV&DY29s+v zEx|?3Pfg?dpZ$7KgK-ma@Yi*p!*7#&G7^bpFAwZ7pv&FX29SXM-B1C+6fMEre!CZC z&i&buK;x8_=mg6Z(-=_#ZHjC98>f?SQFHjL0Q0lrCdKSU=uKl!3VKC*Z~%LyvC6!d zeP9qmyu|rLxVvB!#nxhLsfzp=Er8C&T&0TE23+r@~x zX}31esE{PLYIO1AI|Q`@JE~8?E^#Pro9ij7$o`|u@(-R;Y&(xPr&mf6Cmi?9Xufe= z2^Sq(JrF*%3QxJ4dMMq&k$wXlR)I*fJxu=UlT%^@_gWj2b9#WvEK5cvWYv7x9vjI= z7qhkj5D9!{y2EdNcse+qFI)&}Om3)YzgDV!Mxj=Lu8a9p5li=OC=gS=ZX*^siHQg^Q{5rsMD>8+~`8yuMe%PVU^N`M<1tIlHcJB zN$+Jd%v!EL&LyWy)u0-kBJJ*X>gg3qJ8&HO&>-3F(5R=UO;&CXR^YAkUKNk~r z+d38<-DveNZ^P}uHFAFTs^b3<Xm?)=DZF!l`dHvqe;uG?0s4|x23)?4NWyA*w%QTsa%VSs z-{C60qO{ySk8eu8f<*<%+fLWYVR0PAsRpfYHiy2@Lj~&B=QjP6IJ=-HJe3tkUnVS2 zhdwA?P1G{AMg=AfG9&O}T4r*EVTs39eP#e7@IQ*ES}_39KaR_N%fq20fjU`d zPo*M9xbG1vnfHf?AbF07&to#3Z>+Ez>`@n7X$bT!TTvQ{Yn+nNI;=q&^1k-bLlYD9 z`m=YQp7tQFM8IXT4YKwPSoQv#)_}|SM*nwT3lZBvmxh?BT<;KS@~aQ~#aZt2etypL z<0YX!g#*uvYcb2QE>W>qSwCoKknJc%! zK}&AE%HYmj3aej{lyJBYl#+%6jAb`mNln+guqkbswU}fx98B#97>+W>NXAFG#*i(j z?rdok-dneH_{?h}tD%ld; zCoLf%v9#$G3Ia`0uAx{n~NGu)ftS`@k^&s)MnV_lu#uNicKi&cvP4a;uE843ikUyp5dCAs`Sd zWp2z(ROt#~4C0+lLr4euWn6oSP2S!LyzP3ybg_qc1VKrE$-R+Z2&qB)+@feN%JqOPKkJ<7Ffvwc^Y{lWsE-Jkg_^9{ySjz}1I9g2=R!6p zqje!EB#gOIvkL0@q3Ga^W82ccVgU^e8OLNxAEH^Cm$246d&#&qW58`sdT}dvFclKK z^!-N*l8T)@jv&#LkS6q|I{=l(UUh@ZUVchg!fbN-P&m!R&)$z-_QWw~!0&eKQN526 zyX+U)dP+v!6Z<#oPh*2c+?%*2+Yzu%_p+S@eXI~aKfjgO@}70!UZpHWCn*@1&^^Rj z#$KmI@77U_AtrcVCjmeb4Z{DYaJgy%%*Dv_dEr>M5b4n7O%))4VkprDHIRE!dM}B+ zz^2C9Ky`e-zFHfSr_fkyVquMOdtqm!!W66Z<~nwZBHi zD(YPw7$-Vg{e$;)Y14etL*070C!KaHm!jPxT|s(wPo!q+Np12fplj=eM31)W(x_7D z#~+KvHda;GXX9Ce3LFwH?RDk42Am|o#1H=o_SB?sr-)AQ#c?6oclLtyDy)j?c$UPk z;?xD}kiD+4El+P2*EX#NiF&=cEz8<^);DpX6yAGQjucqPgi71lf|~+%RHINqXZ(vt zZ@;Ep?mZTEMpab+UYGGD`d7KFtgIRMMyHcYu$f{;-TWkq@zT$#;x?UNT%_VmVBOQP z8uO{6AoN}d6ltMaN!cfne@~@|GL}P&hU2fB8XzcX8;{}Ap+>`tz2KuDN)5IWVNJ>q z4ylri>GwK5`XKtj)IN&|@`p;6&uhvS6voTb+Q`f-&t=#8QM6LXe-?6DsfLVpHTm)k zRu<;4^sHW7So>m1tuzT5>lz22qzDX(rC9Lln4sDAn?|r61yv}B4Qo{Sw~hWa`ODoy zwa?x{)uX5cCN8Y?6CMkqr7U9Tsr zV)ksrz2k_FqDMPF&=4*h_xdHEN@~mYzos>r>~Lql9!6Ttw{=)&?NcaiI~cXEZF=3Lzze@qk{ULr-TUGGn3}Ff$)2D;q6W5bran5UF>|w4^A*qLjbE{L z#IYoy@NX>&6nDKrkga8T9!gOG zDTq{OnY(am78vW@KWT62#V#f3R`KKB*mHYp?++ZZ7zh& zrs90-RX$5f!fRUA=OcV9eL8v4V_#ormb3ed?^PO~mrJP_TFM-A(9Pa%U93+pwwZ;) z7GHx={aRW}F0s?xToB!irv*;BevEM2f|}^q6{p^(5)VeCoO~s~(WMn)&LJC;SGBiK5;HeQ60vq$--PjspCr2nHgNED23$DZ)62Z;B{3!P$@u{Hxk8dyF?V;0h++v@I>`cN;8+PJ z)Fic8IU25Ec-gYSM+JOg=IqOr+f_&9X{xwgsThs2QG{kdPr0sNQ6eDQ*)VrSG|-Dm968BewfDMmQdxufUM*04rQpcAos-jmlGt<_c%XBOIS zZ7LD=`Q7)ulkAny_KA4@iH`mFsM>Cb(!+=Z?#Yc^oTYwj-ZOP#Ip1Q~)549%Ke93I zT;4lX+nUx0{O#}|lt|g(`{kL^2wdpi6`#tWE>I-+&|dqvotPg*D z4DD$rRwaof5?Cf3mBMDtJNv$}sv3K1BQ{wd7GPBAV1(?AwM{ReDLQw!sBxn6Ue-3$ z9zRs)me5e(PZFs1EcnOU;8pl3*03nxm1KG>!af<7R z6sJmcEu%Xz#!ri38-lDyNqf$ko^;>K2QRltV`coht2d;1W0HUMHVw1#Vess$3*ygM$=#M!`*xJqBKRYu=JbK*j-f}u& zKijBLe#>Sm;o3F9sDvg5V}HolY+6!2_-JYEus8d~Q{-ShcYAd?CcA%e($7#a+`KvrxtrNqQFtiZss zZ`;(gd3AJdWi0KxacVFUr6}x_)F7;|8W1i+;YcR0LU%H!io=yli3@nQ8CjOBXJsSG68E8O{FBa&g5yQYWa8t z|NI$0eg9ZRSa80@@i_41gXgaGX1TDKsI%7A!N98z^zKF6vIPeb4O)O_!ruMx`L)hm ze8;%VM7T(y&t}Lfd~u_b%iF-v_C!Lxs=?P;x2UPi^e4u)wOx6wiu z`>Nhqdxg<=cth~j|%vw2L^@{xjJthhHNQNr7jUmsxh1Ll^s!6 z=mc{nw`=Mjq2hby{+}6*Ht}?)N(kZeoV#s|r3&6U6aW95PFCZVyFWWa$))|qjf}{6 zP$!s62vS=w{u6Alz@z##TV3P5sjWTcT%0W7=CjsPOf4CLZmD+HfNIlmR0^|rFt&I( zLK(YJw(C1={|0vJ8WQ`$2f0d+GwOu z1o-?@_nwA>7UGK!ba;3P4KsiAz6OtXl>@VMK_Z}Afe$C##PQh#_ryEJloyB zy@=*+*6d1{&wU%9H8@$&4n$8p0)D$drXb0x$rq#2UFPu2|L}wun_S0a{QU{`?tXG3 zcjOx%`5qzUS5qJS2}M;^Rf6=&gQK38zzj6zRpWpke_jDkTmDM}O9P#@|NH?j9Y~t| z`$i4uTlx3Rliy@-f4_|Z8rA;&O9xQd{r8R8+sD6uUzH}NJjlL!z8{*4thlI6`v&+$ z*)JyD(+sC8H|K5b#%kD#{Et{I>)IqM2QK&rU7%NB=^hMX_Q1EN75`i}z|t8T84rdv zZKM#hQUrzbjDKlE0U({88f^Dty$^PC2W;IhT6bIwDJu%%W&Ky?Dl9y_=5ZCC#lAnjWukFr>$RAzu)u*R4{e6SPw?to7H2ICpm|kH-&F6)9ccy4>5iXf)K1 z*V6aWw+eA5iI$Ie6``yiAr~PFy>fVOvUcvh`I~1AX6A+oNyfi!px&CE&FxgWewWkE z$)`n&_7o>ihW1XrpY;+J7)(;^RmExEG;L16mi-EoH8u}Pli zz!Zl7U^1MX0xUIX@KVCA7#@aIoMuw!l37-)m_nQucy$BJ;q!H47$PF@wXqk|TQ^WS zfHo4*ckS-Z|7;?bn^sk0)yKOmx5Je~bXwH77B%!FH?2;X!SzQ*HtPJ`H@}|+T6en0 z{(|8vb!c>|`wKU}b;sL(DHxBW4)HrOpem8u&KlmNZ6bOn{@lta25e4$JR4Lft!W=w z;dMNbZ!Yc5&ueg^GlD+#G-vjJZb}SrfTgfW9{>C9-M7z4bHMTXdS2rFecFVsnJv1U z#u-F!rX3Dh2Vdp-8V$^s`FLktrI9DksPbir(Fs*>9)4cpur#iy7@gGt)l~LzI~K;! z%#0F!TEfzI%1IZ_fX`$ITNZSZZM(CXRDi_^8(7S9!F10%mpC2^8wxe3-XXZLWD)g@f+7Ivu*4t_Oy97ChcPGUUKD{4&RsTNtP>rTNqS>;6DJ#sNs=LN6q*u>x zs|>J#4KKiB;U%Vpmp_FVlj2f5Y~Z0f<`KhalEhSW+3N;wDp*a!fpGi%z!+eGivB(W z0hU4~j)O7qpWti652z{dk>d9$-W8hVa&lcQGZ`m3NO75{wS1ZhGBm4U2i<+~iPN4g z8O$+txnwqZ`%X{Y_Jymf^=?x5oG!001(rPaE<1)d?&CQrd#~gpVy2ODIVY|D-3Gqb zupYg*ZNsgK@v<9m<6B0S{v;r{5fO26e~pvK+et;t?O7$dFgXGoBhJW zJj2zc(wTB$;)Av5#H*Fc+)=w{{Hs?8pk_L??&;l$#bjnYk9Uu}J_%@3cTA9f8L zX8_%1F*#KTe_yovjXa8NI(p8s(m5_1?YSB0-4Y~E2{8cD1$~Ks_XAm2cY;7)Puh7$hb1$O%52e+yw?;qZZ z8d(!ps3@wbG#V$YsPuOI%0up3>^6*u#n5bLbu6Y>2@4>;cesv}E3g8NQ!e=*wU=%y zCk)rAgnN~P?|$mR-hCuCl_-w~9sDwYk6`^)U?4{h;+Ud2tW zDh`tB;adG2{NVNQ%4?6pi?T@1r}HaoB2By4MWaUI>eNB(sv~!gB6!(D7D2M`>4hq> zEzW5}xAg_bcy6x!SXvOyboR~svkmmH8yYj2HE~u}vEQpZ1W_85M$Zhj<~Q>mkaWXO zmWl$Mt?lcww&q(Fc!y{=*sH9zwzn0(f^Vq`E{wH*)t({dy{N3hOqUpI7&Cm8x^SZe z3-6;J!KK;{>$W8y%#z-ji&)va`FS@kSkHCMHGdp7CH3*Gvgd zAGu1U{~76Y`92?f%Z5X1E7_xl3&g){+#!r4%C9W&UBAa;E3ezUvq3z#9$oyTG67*D z01NM%znXSjP$(b_j0jWJdxM(>o<#}Kb`jFTOmK6Ojf0Tgu+1P=E5I(s5xfPK<-xSuif=wl0&C-b?SZP4t7CqV ztH+KWY3^hwYVUg}BbR>KohRZBm-E(c9<_yXdpigH`#nG5A?;&0O;C&50s%R+)ME*h zTpOw`j~6G$fqBv@o^l=8$~w4*Qm3PC3 zULWu47~FA8dlhtz^|dQcRrwx}h#D=bJE(h({piSXjkvgOdD&%nSSVVD3&M;#I1$_K zjEj~w`CvVlOe&EgaUCayySS5{WM%Z;VZg=nxS#cP7R~IMsgIZ*UprO0zQoZnX~#tE z$m>XM$UQ3MN{Jm0+_G7=?g)+m=ge{@qn@2Mis*sLYu-0B6iZg>x0qGFg)OXYPm;e3 zhex+9!ZmI18Blsq!9dI578~-zRdT7Z^2T}7u*F$5S%HoP*Znba)oiFU9S_)VAvGPDWv#)^MY%s0Z`+`9dkprW9I zY+Mo>!Net=P6i?-FKVc=Zxgh-u`K6Nprg+-8GUZ3UhQDa`=)k#rs?&db=Z8}uWKBO z)K;<8L*2^XAPYL6-pjzN`>5MgFYrpKpHBD91z56+?tPF|#MmcDdoNl9r-TwOR*mog z+7WLvR|xP<`TIGm{n&UVkUqWI8vjdZ)7atbos3MbKa2r=)g0zvtl5|RK_EUQ&8}&; zY@lLWva*sHacdB)rIr$Aa)^}m$IK-el&dla9mirvvmtVyI0$?zmD>6Yrnn`mOI6-g z`1m`?BTM6~=a$T((G+_&RIwD2talL+_pQ}~M1WgaRRZBWQ{}_vy3m2AlEBjh6te(2Kt#Svy?=BU*48s%ugx-{BZD$r#2YJS3N<6~~cb07`AYV=2% z?uUof->Q*IqyzJz3|}l7t|NEV>S{l3+C+1*FC1;0`?o~hx3x#J!SN$n`HuXD)UzMMthWM5Q`Nm1PRp#zonz{K zk8AePPwf`#sEY~Amn!$0>>J8APC6VTZ29Y!Tpl2XRShei0#PnvWZW_GMlcA_W9aeW+}9h{DRhH>n1@!=ti0*p5e$2VBKiszyM8C6;FgZy>k20> z#>-8L`A*35mwV0DTs={IIdq7NH(Ief$m!S{rgUv&Ru$O>Rc9@5G>FmLKNgfg4vyZ1 zMU+N8AV3t;Au0DtjG)}OIHKaHU93DT=;S2Ydfqkk7PtEbQ_hvpUSqDKrl$$IB2lw8 z4kbLZOqFOb>{hjgJ2@23@`+T7I5b1)l!H37z?zQ9c|?11%?@qjxiMhb!X?)`f!3OI z3k@v>j64wvPM!iy3U@df*4L9YazFH$18wL~H$TD0p6S~#3fm%g&U$v8v8~PY-1P_b z#cd7BZYkA}8QD7=DOLjVcmXd&j?>S}*Dxg5&5bJR`zaA;nBswLqtHm`TO%V=FTkdh zeVWHGdOjs)xJgd{lg0&v34(W$JW=)W+0H%Uk6={?$Is87qbC#*HG)`m>pJS$c3vc` zhyrJUQA(6`M|tZGIJDg)j^Q zCeo|N>1%D2klhsEtt5ZH{mEqzL4!!@O!v zM!2&v{;OXfEHhmqs&m0im(o@_y-+_}N=o=W)LE`M?8`dyg#8a*|C6oa+m||rYF3Zh z-Pa?wx!U8sc7Lb*9BjBxm*T(uI$gnDa@9w6yuWi=5O1ODHCJF@aC21R^ats{sk#3b zaJ6*|F4)$$RdcuJtBSR+)~cR$8Alwov2qkE9&?URtT6YW-W=s7X0{krR3TO>$vD$Z zl@^YZ9dAeE&MZz6X*5PHrpR~h5bdP`A>YT{wv(`Z%F*Xgy2FG0%VS3qmp_Rl8T1FT zS-si4konoHnK40_z(Ta^qI;U2U1R6ARG*9wD}8bYkb{v{{J^Be^77zR!d4sWKmc}-*=ZfDLfMz4EVuaCiNW8r*^oGjyPxNPKwpuUcflJw%O*v^S(nw{Z`(R0A_0k^2CzT~L@UHwfH z{fl%ccfbjDJrEWQ>2 zh{oNXAFB(EegiuKY2oNAcu%M@T7W(}N!Sr9s@g2J<)bigI0thjFW$NIB<8*uzQtX; zP*+&SEX}S~>p*{TN(f|-nH0Irl)5DoM893eBF% zG0R%q1v0^pp~57FWVnoP;LqnL0Dmy-k91l}l^;Q=nUk#;&qi8rq?5~y9W-k<0$Ie{ zbV9lEO#YzZuMYELf7T(W{SK&5# z3=vx?)EJ<$Gx$mZsEr?g(fjmWtX2b z9o#i}!oGip;Znc+{4sb&b#H7x6`xcX8I!}@_7)0dz6t)4mH_0cznGJ3r~K9R`&BBO zD~t!Hqo-HGtB8Y%xj)29Ai2&|;ZMmCp_jx?>MDwnZUi!>`VT-l}CF1R;J_|8)f@8@WwV5#40#KUQCD8T?+Hn3g8?ns5_{&G_M zY=x!s5iRbDFREP?H=C;Q`sgPCIa-uSNuXm6@}vFl_tW-VHtVjX%=6+W+yro`3Z7qW z;r5wFB*ix^M%8O_;u5*ksJ=e6iBeN_>m#;vv2GDs$nKE%a#LWlcy(VDbRw=)?90Bd5N^!vGvB=#>e-+#sM4X;A zNuIr|!^{-_&L~?|yLP_CmkqG2FgxZGmJG*Fry(Ss3=d%D0U9@@M`;V5&M#f|UMk>Z zp#&w=6SSYnlY1&n-ezv+b9B?=N*_mH@aC&#vV6k!0PZkuP({+9q$CpX6n5)pQt_;qp+qi~boLDb zw!nh}*W;Nkk8{VxaaYcH>)>EMQDJ#s;xatVY`B8uEHjC@dFVC?$=kGi_3RxgsY}ZV zo}GPM`7&vK?){M}lO(3_xi@wQCG{!tQM1R}#{pbbmV?U&!Q4}^D#G*n;Qxjt`MwV%9BGx`}a{U zLA3;*ui=jF)mv+~JazcnDUE<967gCQH4FaP$#w%IzjGhJ790>UPi4^=dzUG&Ct}W} zjF;6ziq-D*cQnL)j?S z0HE9D&p>$6BX=>Xm9kr2j<-1g&l2xHo%1q$_V9?q=%su3(a+qAkL$qzH0?iM8Vy&Q z7lBijOb)1s4Z}%NK3C=3Grx1+C`oK=B(d!0oO-ef_b%JnNG%>xD1uh(BAP z?kn!TrOD;`I3VFTUHP#GC(Bx5AK`(og-GhCuYRQYDmEjuUinz9Jh{U_hj=fU`~M-*J0JZ|i#9+FqHjSaD0*?sOJ83Mb_B zuMD$+{v2LQhL^T(bi-#o`ku=lR!j{H){4g<9u@0r-DgzL-5Y4+zVZ=)5Mk6^?b-BQQO}uBRX!$T5V82 zx$T4T9^Vv;^4rhrY*<{a-XI}F(78)gemLX+D3&eZR^w^;jH51|lX@5T^#M=m#wyqE zn4G^)Z}#juyaxRFdf&wP6!q-7sM+5G(?pAW1)^prGyQ%8&CoNRlJ$C+RAyQi4@?+NXEjA}sgTXp0Q@$v#Mk!Xn@p`ScuBhq$vCWFJZZGmFv!J|h#PXnuwi}gPA{sV!FGqH zQ-u*2?)V2v@)K@#G+{tRKKj2AaR-plB_5ae4hXp;NoU9?E%uLQvtn&`_t_Lj{{OYI zP}9;Xh`DdQoUFE0o~*K0elO(SM)=~^bb2+TtVKD2_0tz4E%bhZKmENJmsnL2qu3Hn-o3i2 z(#ZGr+f`!27{9f6zenA$(fGGLDUWoB)UE^Vn&al{4JYT8#f=6jr>9hJ(`xZ}?x32` zw)x=AN{454=gy-s6L;iSx8AQ9rYN`J8F27@KGfucp%XV8m4%8vXVf1*x(-Y}?tZ`s zko1&xPvmp=gVgNAm+uhcMGANyO>OYGZ|#_K;|;;VdMqm}Z!p~GEd_1p-tK{3u}B`j z6ofdfey`Bwa#|>w=BS`5bz!fx=NBT^0LvG9sFYjXs9Qr;_p$0HFrkWDoGxOx36TJO zA~QRL()-T^S^;{D>fa{3S%*Z@;w_<vhxpCDv6@fKdziNw52&g#x+wVR%8y5Dc@Z1IY~0@i&@ z;F3y2`9e$uFGt&l5_8C@gvXQAN=kmq)Jn4{XbZ5&z_#nWEB*WW&d-Aj$IDk9<3U=x z63bt!7W-c#H2U_QbV3T3PA~%DG7Q%NPHaLK$xGa`h|-GWf4<1ql_bE5-v?RlNsI@q zVF9U^S)iB4;XA~mRR9$XCpPgvH$E}DF%JAk(c9(1@f&y`1|K7=X0o@Rk!+pE@RDVJ z{cOCS<@Rh>&V!4=UCT{#d)SZ{fZ2go*ay^R+qY4T;`79HuN*zi?}VahzwM0eGrXd5 z8pD7iLDTjwLTT%@VM>|+5Q(3ww`MXH!-Pr5Oc zu!YV5J<8VQ|8@YKKYF2LCQ&rEGukAG59{>@1Gn5KNZ5zlx)z4O4h1~ffl09k_|n;K z`A}-``5w<4-K>Mo>5t%*w;EHVyHJk(vu5dTbpBh>QNGwktE<>ew2=5elc)GId0U71w*Nc&s4A*{3gRn_ii7h>0XG_;nGNcEwsFiY+Bf6Qd55mTDkvfXVl3 zeb4pV6iZLu`7Li?p!5rtTO%ZhGjvn8T~^X;EqgEhu1{Ad`#FUkadcHGgdZ>i5(q7) zooAc!8pA!YD+XEAH7h84tv$_KRPH;U--_P)u-WC7(a7;v97ZuXO-3_bP zYs-sGSC0}N`ke&ju_s3G=K)}x>HcmW(A?IF3mWc$8X z$8GO_e3h6A9&!Vw^81dW{v3DQ9k~mejt?B5n~ushF4_tZFT-cfkuFO~=yJ9#R0CNj zhi7lS6{J^Zy)xBd$$Q4jFs`}eFCh@;w!)4XIxlWI1>M5ywW#^1%v&7#{H$}-4;|*YDNPp7U~H|B z1O+31fSW+RRdWAALg8s$LSCje6r6D{a4Q*EnUoSb2v((gPC7zE3y9*4+0-g z-(jrV;-xqR@LuiO-=F|~P{f*)eX8>Yf&~r$DgDpP7J!xaG8MOzqWH-+n-xbtgZKc3|EL&y9=y5U?A)|e<(-eZM` z0G;mZ%G%(OsAJ%xV&_crd#^L~dB2#pzyG5JU(uA}0`&)_J4mv%u>lD}22$E@cVHPPh$unDZ(xU|JjKh_O9~KlrgYrV z!fADj@^xaY9zhk>D{nsx97^YDOw|Z@66&+>7mk6%2CbSJtmjRAMzXruvQ)Vy(}(6| z$Da(S%F@#hAg30vlcZh7o=+Pje``s|)D|}BJQxvgGlKbmz^7I1(#H9=P0!U9rR)zw zW9aK74HY6A41yIb!>zbwHuRS5~^oDHzHUPU5R@|oP+)HloG!u*1 zZ3mQ`r^tA=TBT@Ay`87Oj+k^XjLK4xDSRetl@o=h!$23Ti@~jrlNr>>>w91dEXJ=} zn{UI-X?v#acsdpwrX!L2i-)(0y3v!eT49*ncdg zF)^+60*CGJyTkn4W` z0Nzz!wyweDpZ!^oH~&p%iek&nkFYmokmD$(nfse(ZF|K$IL9BM`+&ncbC(O<$1rXu z5u?lew@PGWN*ik_x#WhC&Joz{afr;c_^$TA7wZ*!lE*kNHavjlVe$O($SAWiKEbTp z!TW=-R8d+6nV}^DB@V-bEnCF)V8LYh~BH1Cj=lIvzTa2KRj51S{+O78>}usw0AK~ zM(VO#St`0~(KAKd!9R6bg=}X@ai#W3vby~DOGrH4X3y>(kZU9FmVxCERkG1^cXW+i z$&q1QJ%3r3_mNUrU`bQ2YxZpY=o>mM>uk##K*r}kZL`jZuboB@ z+k8dv)bzb6BOj2z!&2qYPcY20oG+N2FrM>BXZix?Ml$~-Tst~y&N)Ot)%A=mxZ;o8J9tn+9RdS zF~j(ghHw;Nm~+Whs@nUsWW4Y}GFaCL5OP-vH^*zwHzNyRGp`=mM(7R+A~vB z$%zxS4smX4qi4QZir+deu+~7wJX_a&Wl023I>qC@r7CR#muW$UJn`h)SdJQ4k*{7; z2J=S?tMg&hAT(#bbU<{{ORK{r@AlZ(|;=)WL>Udh1%MDu;gCM4M zvW(x9Zv#E>VA9ok1gIWS13S%}WMLNqJ4*X-dVDGKye!9frF{CY5W~_LMcOP|gc^7p zqU#960lrq*De>|Eps8n~w02glorT}ao-*Zvqr8P&Q>%Ehgk%&|;=vvsr;WrY z14t_a#SOJ~kISyZXYK6zX9qS?f_#z0O$Kxa{mawGk=t0<@B3uT4BFp zVhes)`BE^BFOTN{EQ1fm`11XZo#gVsr9H#t zE@R9jtl5d+3{_ijb90DS=T0X=kz_{)=u=Ep*&%N__Tl-4siBF}FTRP{Pz=`!S6pbz zBy2w71vht2#j5{~l2n=Ko`|BBbOeu21%S;Ao=hthc=iGm1+a#P*QY25bI7LLra{ z;3;XPob7S!wAhF3QT(*nsA3OI#&E;W=)S53jpU=fz%1OeKQ&q(t44Q8sFVkov*ik1 zHo9qh0ieP5>j|XYRPVJTK1408tXw$Zy51l8QrbN~u5Q!f=t4R(Vsso_6p{Eb|ipn?#P>QFp2{^iBa%==Dzao&r%9$=tQc7 z)8cKnb{_cS=9nKI1@9=6%@~nV)Yq2@&`6R_)nhGC93sOF6|e97@aYylvh^j4k>*(& zQ?SSvbe#KMHcx`5g8>ucWa*-Knr9@aVLDecdED;Vtl0@wi-R_9w=fuXJSG=UI=|w?f6ybZF#;6Rwg6-i0>+*(-W8R(S zHG2|@jfS}bi=k5EA)Aq{x!EKF0CD*Q#0*S5nNiQ_Z6L+`plk@8yTK}xsk$V4^Sz)J925WQR4my2R)hGxO_@wG z&^5by6cKKb7P*qqq_Sds{}QPplb1BmAd0fE0MpmtAji9g{MohW#(l775~fNZKLAMb zg}RM#cP9ndl~@?A3^XXHQH$G_>9r0k!5KA{cE{H~(Bu&*E>kOTMCloP3pHexx*k|B_9pB{c2p@8!an~wIKTvcuHk2HoUNfcM@;4RWWX(KU_1M!u2GB zEl!WU{gs5l50woFh*m6i0|KmIlMSrI$m~Z2_=kuFm4TB^oe$iWs(TII*eH4WfMs`6 zK@_3~x@}m)QG43Xs%qPD_WebWbIGtM~7n9bifI+_Y@9>7*_2S`5XD2Y#zit z7cvB(O0|~?k7A(kBw+7oE#k{5txB4zs*g2p#g0~Z0?DHh9^&zbPkGD_PCd2uwxhJH z>&|i?jw8(c31-6>1X9g3*OY?5`2&t;+>@d8L)}$5 zc|lp%-v%vg&^NY<9Ws~T_kI*7Q#*M15x;u=K>-7dr6z4qF#LvA8N$TikHQ2rc^Ptc z9b&X9l{eHY-!pNgTb!W#)mQtxhAmEQKTQ#fP}m;G%xc2GkYT(I;(_*8%gIUUvig9Q z-M*n7F+N!WmvEZ$mJ;@X<`Q242XnpfK%xf(rC8Bt6EwaI;XCtWBM?8E(oI2~qndf; zD70lHiuFn_q}_1<+Yqp{=E@i{pgt81Hh9DHxY)?VNf-5;b z15UfZWT)8O4ST!scq1XpNmnho3d)Uar^r=q6}t{^@DSH4ameJx=mtXiD&ODK;#b3v z8FWHk#?CF9O`5Vjku|ZObQYNtUc6F$Rfj|bHFLCs>eX_tE?e>R7PVrUj@jK~Zu1C- zhB**g=%3efRcdV748mh9%`%G3iu{8(@;=#)bt6d#j=YZLHCj6FBoz9?M(6i(9V`-z z3>95yMRnAFasziwdefS>3re*7&<$%X*H+p??toAR70l3t{h>OAd^9BZRBCh*W#B)3Cm)S2)ZhaS<}mYykgTFR+{Og600f z$ysqWQ#2;6y!)|J$JlPee30>IN%qBE`jNwI?DOTzJ3n<4X!*9^H};H>Zb*jkZz&LN z`rZ9$)l!wQ8-*o3u9U(v(QsB%KxctXCv4$7e_-&mdCY-eLd*VJeF{fz;+?0-(^k8< z8y3mcg_dr~{xc#TTG;gaxx|}D(U@~Rl_ID+ZDt8W+;w3j;09whtB({u^k zzwYb6(#58sKp(?t?*`Oazkm|(#PCQ3F?eRUnm}-7twd9hbqzp9z3JC!Egm5({a}?tp{Ak zcG74Go)A#P8=LeCsfYK}I}5(g?(1FAFp`Hzf^`rxc^P}TmYtw6l*C^42P=Jyw_x`d4FmW`uGqbSk%a|xKF>ToFWSIQy?J}8zUsd5m=AQoOHtKz0zIE2XaQ+z-jS--B#_PJ zY<%1abelP)0;@m>vfrYa6J}t7htdidnn+_-iwx$Q?kXT`Do)?48Z@D95%{>{*w{~G zxRyscJm5sVP)TUzVro%E6Kz+#>wiS0cX3q~F*M|5|kz1=6B2EA^~qrAlh3z!^V=m#~zAwzD#Bav4df#*(d zq1&#w{pz|xD%0470CcZ|aXk=ok35-)*^*zm)!!2~krItXq3Mz<<@Gq^e8iJ_Jf z!P`b3ZKF>(;IMcl>gA=DT`&UF<*>D0#p^f*QfXM$VtFjW9ty&{?O!0ydeq?x%d z>TX%|Bd+*;*bh7q_?Qg6)4js;LJ)>+S?G+h$pN@xi*$|er^vbX~dhZ1WN1D ze&?yw@t!uo77WOXFx2)p%2o#OA07V%O?9Vf%>m!dQN@URA?pp{1SV=;Y+o z1KcLV@S+!x{$`{sOR(;-)A5jB%XP_iroH4Vy6ljbjon!TjEZ^O zwn(lLQCcRMmH4%`pPFfu+Kkgfm8WEVjcBOaq}9S3uv9>e?3Qs~oksg;5D`5t#$Kxj zVh@w>PD-Z-0@ue^KA43j3otIG1^y#T{%AF@J zF_NXKg>TZlv9;Mwo-Hu<{OpK@<}!g?WHBd78k@z|=`OuTLhS7;*?mhb%Ge7D0_K%F zHM{M;6cnu5M)>YaH?u#gvuYg1Zn!a5y0&y%P}oL7$Gz1vha)GG(RrGsHtRD!DaGF| zz`>I>$OqXYRQE+R&$8Y_`(<_YlUvzT&D6~zbr9yqhzQqhWeTHrcAOKsDp&cGM3ud^DoL9O-66kCWI(>%CdaU#g`J$Ke0l`KhBg%YT@-M0=JM$b^{&uR{%YVu zqJC`k>^b-^v%ISM^rTb!)0CrbwZpMc!xYp;P5HcuM{HT#8-LT8CBrxgA$z&_|5xAw&clG2gKwVLjg=W8=eZJ)ctE z3~4y&h4uCIAjfdIwK%%F-}`H}moAaw&jNfA01(NxL!w3WVE7`#&isL)KE!A6ig~ou zLIU8c)<#Q!oA-VCEBp4n(tB9(V;*|yF@RRDdm z?w*G0DI=2TFJQ#bsF*u9`g9$bz>*hN)FYgny*qgn1L1+JAde6~&h_glipx zt=v5 z0?@I7B&R6|2V!#}SdF`6BE9RFfanxy6v_)7gS;XiY<%uM0-0Z7%2G*0T-G^mGRz0G za+3Wh!Cj&e8vBgiP60u*SuiG#u?bJ)lAL#UcfSA`7#cz$ezWlc)w-KHB=9?DIKiEI zun44)pEEQn6|IIv&#uscF5Z`I$|Xl4<`Lx>F$I5Te{S}TA z!Yw4*sm07c#LTDf0O5EONJV&Oy|EG8nXknU{3zqDTsfn;zTQXYxxV=a-uq1fVtLO{ zy~1IhHG+IS)o@mnS6kliuWq0{jhX)|ls||KI z0*24Kcv@IOjT+U-ZK^zzr+G_VFHv&1zwI9K_*CKG$S$B$D}2-WqR=<(&XYIC*RSdL zU%|vI>>L6Byp(Kpt>@340r7<7e#0Bt_VvbvxX?w7a3f!wKoGS1{E1;Z#hF#<&bh}) zskXI{Q{#$uwAS%oL}|)r&2Gkzr&~74tqs$-t>2^>#-m;wFVg0`T=BKweOe?Q$L_-T z6;}=nJZ$e4T12)whas4`E;lF;e7C^$Kst?)U!dDj&qW_S9jIW>L1*M>0r2SihNHM0 zui~lePJxc@ko|_X6vFm{x$~s8QxOA48Q~(TbUhy*geaChfSL%> zTgV&>tvT%E?mK!6GES56Dev9|U4^h?U2{otnrX+M4wzn^uNC3T@$c>L21Xq8grB@6 z)LH%Pu1l+ZVY>i0saeQgu(Uh3K+E$i&>-+pol23Y57WSRMQp_RUT(V@6{iFSHjU2V z8Sc5kXzBA{Q|g?cZ1vK6z?BLjvq$H^u@$?AeGKANwl7TX{WA4{4!FMsO0kI3x$haZ>+;MwO?S<-eKWCy`xX|F zFwx5FHCw}z@MQw*_>;5VrB>~uwA~QxbGxWOqjyyY5{UbZ8Z3f)`(Z?gwJ+|v8l(;0 zuve7e5gvP!FmLSR%uem0$-Yx8Y_Z|+h{;t~<9jo_Z{28tX$>TrahDY$U&JLRV;%r^ zy|uOgyaeidxV~sez-q(P4|+c7)HkK5e*63se(JMZM8>9B&e@^1XC(0VpzpF?XjL2I$=`5wv$9nD}!GW^`9?uTO|i?Qp4bsl?RCt~k)M0^Ao zmQbGlv)cU0_m1bNLha{!VVF3aW-hv4tz&P3mfS=BiSbvU(HWw^z<&4r7U&9Ft|xuZ z-a)&BtLskdJbY|dSgAY@k6Oo}o;D|I1ic&|-3Qud7d3+`9goEf?%`dh0ERz&X=&+2 z;Tv0Xoh+bPa==GoNFI53Z=7#x+r6%{1U8h6w83)-y#S?Td=@h)DC3Jjq3}q4XK6x~ zFEu>CEHn5cX4v}AI#Z$6IRvXjrpV_Y{@?}Z-4D>a*0#7O%-zJXBrJU-zd?@aP5pzD ze@$4O6zqa3@HinHKNJR6R4^+S3ofkw(GkB#u29h_F~^cc;im z_nhuWj2~$t4t`7yG4CA&@x`5;)vy_Myn?QIcobaZ-iYP}TmMCJvgpD#0a?A!;gD8Np*ECnz=B7posy;Njm&rRzr#k(^GJsaU`?{vx4VAsda?grAN#ixODorp)GDSd z09t<3enaGxZ!^fZ2X%hJtU!-JBBuU&@mArRcImZcuo+KnP8BI75Ze=?`d6^UpCBe; z*fe=kMBfrh$5Msd_|;@d4X?>yhn2sw8Ytw$mFfGg+%aOC5?;KurdG^?4!JR671CJ% zWb+e=@rG5yyVzSSfdddi2}+;#BI$Fa#H-*hoDM(I-ui^!e{B*>+^PmTThwEPuKRCF zbmtlX_8sVa!tY<2y1lHTz&sFp%5tM$2skZs*O=Yt>j( ztjaH>3ZB+!f7$Ciy;qEkd@@>keE(OKT_j08T3#~{$bBiMP`aD0T+Fe!!RX2LE(UPy zT~10_3*<00CPk|`w;Q>dEI|3R+9VW4wA(3jzLef55n^vP`BBS3R{|N1I2vr}A$8j* z&UmXWq6JcR>nbeLnh-_wub-3V68Ho*xgUiXCn9pMO4c=#HixZ+5V>WZ@Ugdjv3PV6RP9BiI>v zj&Xahb52I8DJn3J)nvQrO8u%m!C91`(P+bY=ln2bNzW@m}6+T*;S|7`c53`Bldrh3AxSc^u84m&M`ur#ko1ZPU9=I@? zUgrmTDq7Jkur(}!N6NT4aNj9YTz&TC`D1UocR{xmarC3IJaJEI<#_!Nu=j|Y#3oxe z?}2WzoI)BPB>J>P@=yV(?qr=ZKAW{&;uM(P7?-?>IIiA7@5)c|(3Qkv3j8oNm5YF1 z@GbEM(0N-)YS#b*!aG~s`|e8jHL%FdeJCw53Mb<382^fa1|A#3^%F=@Cv-Qe~-`oo8Nc*xxW3m1jkte+fvVS5CIQu zBQcQ*AP*PmWV=~Xfs}CHnM}9Q6QKE=Y{w`Lf7onh9#}a1WbyEljoh5t;Qv^2_Otf$ zJ}B}9`BUFsr_}Nbkq!zWodo`zC1}A3IBjXCP@WzdW_*Hf2$=iGFXL!Qv-S@;1VCi( zzn{dxz(z~e#=t54!=~01w2y(RQHUaX*N8SP~VWQGBn7E_-ZY9yw%xrnMgYn=Dtn{ z=KvgF5298e-Y;mfp@??=D0Tq-HQzUz5_VU5Vm+`5PDd6%zYTgLY96!oOLBNq8r#f% z8HR8FonR2;|Eo3Hz9YzDhuzu*ExjFhr}1IgqC_BAMhAJKeA$L86x)=Ds-E*?!Zfpl zGiP^z^W07O3bv=|HQ0mo?fH zVQQC}%G*1(p0o?LG?#9rf}TP76G(yk7-wZ8np029pl&lH#JLbe+OXWm^LK)RNi%aR z@5^d;M;(&4fQJNrMsC4ll)$_|X^{hr%2JLq#kU5}Pyh?@0aUN>&=^c?P9TxNV9if} zo#S|jmRSZe^;}3B7a^cP0LI5BTG$2w3ZH9S0K*le*k;!q4#?17!0}-)8+_;-10aBm zv@}}Q5=h3)a?&m0&-sjrlXcjk*_ev4=s0g%1^BaU$LVw|sz(+js zCEvQH0FNS>TUzH!Nl6TxMxed5ygw>d;8=bA%=6`i6g_TQ?@?2ffH>Ri3x|9n1A?ee zY?|AyS0RKQDTmq1AV7vtTVYzsXt^5H^Kzzj*0!S+b4lMpVImD(?zmsz3be5!u=h zC2$kljBe()^hqAs7EV9QEOd?V$GXh~aGFlzoB2v{y*Fog$Xiq%RBt<>;e_`y>2omxE0J9v8j z9~2}Z0uqSpqwhJ8ETw=0yJXoKPsRU~|Lx(Sat!BQLXXPmHLF$M^&7LXZX?qbW`%@S zIR^@<(ebI%Ai|OwkfwKXCKhE-ICoolTaPe6fVlCAv|^1+hyjJe z^{fL0F%ZO+SOv0X+2yQo$AJd!feSBIdQN!RtVQ}_Ww;*$t_aLtWNQv1f1J@KZ37P= zwfc>66yUrA(RN{fp4+DL?p&WJy`;70Z*vi}>>`?%TU*PlY1ti`fk$^qLDrA4V-)!Q z3XsmM2K6aZk35WMWB1>^z)YtrB}lBd5ZJZ6KxT#jHZ8II9_^ZTtVwpTrTZ`IyJ!R7 znZD1r2h<+k`JI6OFs8R9&Vde?ryU)lcM22U2-Gy`o7u52<}~G_fB1yA5vgK`fvs=i zGmT>FK=3PQW#Ul;zD$zEV;z0d#45(czOOqxOxddTfKy$;pq>1vxv?3C8IZR~azdM) z*KYm!D`j-(@iU=IfYOmIqfV))1uoK5jCB{lFS~DXB5x`Ae55YB-Adpo5oGj{Y=!Yk zy1=#{eJ|X8)wq}R9dal6#{az1XpSw_+jNn0F3L^5|W0px{UeaqT>hRq9N z=zoyJiAD|wJl@NaIs~9|i&p}!g^oeIFG~PsrCR($)vHH94)3M9oylpVW$U%60xS{G z-zL{##UQx#J{P*er;O*y<=M#khH>h?vg00p0$PYS$~N=);B!^ex$#g~;SOL}kiC=$ z7$`i9%dzGG22i5?X8o`WpPt~3iOZ^-OT3#~%qxqS%4x2FOmtwV5u(Kh+XV1gf4+iQ zNvl%^LI?47E`*nwnxl;Vs%=g;_}40_XszejX285)Yy^CApzR5^48Xs2JWeyA9R89FMp#iUyDU5J1 zSI@TJQfCunZ(+ih=t{-yne9MK2 zN7vD$=m%?-^2(-O=Hj%oC0i*$*svnJkef;t?07pMe`@L&5-CQFmMwWhwk@--C-%%d zbj5#R@zqQnXz}Wu)$;Gzm>AIMLh8eK0Z}2!=jLCReK_yp&KneuT?j{ie}}zomcD>y=uOS4VCWmZ|`c3!|KQy@y}WGlRYE z%h+lgAw6KO)xNi3*i@18+I>+iod06J1sR5pXJ#feI5sp}s_&2Qg<>MF;HEX+NeeOy z!bk0|Z3u*^4gV2t(*l+)9Sw@FGv546uY`W`=K53v7LQnHHQz&rS%7N0e52*oR8#sD zZoEqLz@}HzxN@2FO37KpBs#3$1{Ps+vW@z7$Lo_flNs$93GP+OVhHS*TEq)KbYKfr z6kU$tDw%(>!h!(2xD?8hbJ3X!IUKO!tm z9179^)y9L#FYYs;g#gQm1yh`vIsg94l$qAv<$8CQhZjt+p8;CNmXyIY^=FI7lj35` zoFwnv%L}oxPGk%4Q|oNXp{T_}r7I5s8E0?bLn~?J7}p=`YbRe)OUB0OgbN+9tU*lv z7l5d@Y!k9%_!M9O29r1%(&Ql9s`)Jf*!fw>N3=$UAa4`2INSicnUm|&JN3)mMDXTx22^$H`d>=KkjiAx{0xnanaOINe z^>v7n>E%>|>wct4SstNmlg0OG{hq(@Lm0i+^(F^wbiAv7zzoP1UaN}@>g|crznq~U zU>l{%;?{&foILA8oCXW@IR~UUjl;z2Ap7_&eiyHtDyV9W|SjWVlV>?KH*$Rc=K?cvqm7Rg_EvK2Fo ze}f1>D=T(ov=I-BE68~(E{+&jol4T%Nf7GLN=G=Kn4;P&`a3WwT!Tg<+LVcHi(0Q7=75mYR>+)lUEs3Gzl3`Y;yWExx59>kH~K&1A=} zJUIons-O6XP>O8pmA7F3OZNk7W?puy8jWu{1~FPAFhiyFJ*=+gHwbZda+1s5K1IE~ zbdVcYVlfF_w6F?IuWQK_V^G)#AwwIwbeda&0WwjvD}Q1o;H4$2VvaM!&tziNwA0-| zolpVTxy5K57lT}hQus^nBl$!0-81T z7J}DQooD^LCV8G>94g_SFCNV0@_jf1v9ix|UE9$h_R?|oDnSHdl;5~F_4w@D=<|y} zIN10)V5*a%?+`E$5iLr8Qwlh7)RAwsi;-~od>~L2WR#s@Su^s&tukJYiFfkSR0i#I z2N$?cVs;Ya$pz4P;O(2fwqz@b4pB`m?SDbtb=*Ck*miS)aUuZ10%b661Nhi#b)}KR zF!3l>;Cy|zU9}R}X;Gq(?GFPRV)cehx)t5EE(iI$f<9^BHdqR+&DS15-LH*Vo~jBx zxBXfLfPX+fHtIi_J&5mU@Lqc(?j?_jc_Bt}Ms;+lQhcCWmzb-v8tisI4X||s;!eRM zwIeb1mYWB)M_rA><;JwK-)WL^rJn~G(;>;CFS^Yx(YbmD9eNVG|DA1jb2!i|r{mgjxX zF_lf=M9vf!f3{)1QUiO4rbq6OBG{on^VEgD`_m>jF+0!f~&(B)7e>V>gW(vOFc zO~{aSd(fmRe1~vi?V_oY0JX75Jz7%pVZL)ihDDaQ#MBRRn%Tss2NFKb<*Vyk?HbO3 zoByCASg+(|aHu@R;E-60zlFL52NUkl|n6$G1ODD zn`{ki+w#d?4Q+hTP8*-L^|mI+{$Of5x>+3Mi)~;0V%;<&Y#!8=Edr+Vd68J~e#y92 z^#?ycQ;1#wIsg|r04`8LOdW^|J|#jF?fNn&3AYIWsa|;6XtSz77^9M!#`&0xe6|vk zb`($o@&G(Pk6KbSVA|0*{DZne4FIvus3aF?7D|0-w`FDH>E0iTO?3zoM2&@*x@i_G z&4dUrL6L2*g20U3$^IDmkntl}VD9F-9r_argj#A__5HoK7;w7Xj=lj-#&t^j)sMjJ zs9%cU-~GT(Fs>I*+e63c@!rv$F%!pL3Z6@^)SYQIH|C$DHV&Q2{AVs0kJ6}Ah@n7_ zc>|iwC}mD?7$|e3u-j+)uQ1>iKqyHQH_f63ChZW2H=@qS3BOA*iT$ktiv2y7`tG08quj;Zl2Ljg=+G1e2&Tt)gaPhT zG39IBQTGNX2c1x(lbp=9fUKh=^|zU}>qQc^u7Z0j#K}3e6kK3%j9p)tp~Mh)&Ao+u zC$(n=wXPiw35x8s0(6<#AjHGwc)d$!42n9+RJwPvh}rp2Q6um)hj2GQzcbv?(t)1{ zz^skIEVC3c#k+{on{t77wUaZ2+?*V0fiO8Y ze43TAH%t`9Uu%)rqlv+pRw4KcHK-qQuJd)T&rCmP>oq}$)yyV)`Et*S98D9uLp|n! znKg*33#+ymz|l7JJ?l7f4kMqa-3or9rb{##5IQxA^(f~LT9IN)fYhQ^m@yu5k*@*T zs*0|XwNW)5jYA6=etx|P$l|>OWD?rqza|lYP7P5^YCWHgen#rKQAAGdaB{(yJ{?fU$|;#2+)=7LX%FM#JFnJ za;D3Z*A?fmmolhyy}c=auBCRvx|ZkdBy+{%Y(SLD#aKenLV+TE+Ei7b%SZIC9uUK3 zt)4WwDf#4U**|wI+uol#HMc2aX-G_AUB7eWG8Z9ZX5L;TB&NH#Ywo%&p-RQhI^$zE^>8-@rd!~tf zyc=7C4~AJL+!WQ{6zzfT7V&^{M3}e4LW0-MOy-GU$II-+?;9aFL}&Y^H=gH`$JLyU zx5#C39aRU2wK*sKs42}X6T5;5qSv9_7Jj*SKSxH$lujw8lqd5RxD;>dBK3Y|F4Dx8 zy=_2i)=*lqIPy+$*X3G%|E$oMP4<-8LrYdSe_UiUGi<+8bgk>SoOCu6a|d`Zb0IZ( zeTDkOvA-X9SAlhB0Nl5IKM0o2<|;!jOq(?G05Qu}y?dWox;H!5!cC3)N6ksCHaM95 ze;>@SWs3qu)U+*aterXGG%1{1wtel3e5X|DxE0{tQ8@>ILPJHImXS#;0+|(2h`-`eP9{7dwq|5wHT_{DFAUw_s!dsoEkjXo#931Tk=U$ciMYK1{jF)2~&# zOP^6%%KoI;!gXiE5Us`p&`AoWFeB#P?P`R=^k{(3QKt4Z8kGio*^G*S7}($C-1q#k ztp@;-?4B@#$6w`7PYeZLvf%yZbpJ(x^dB5ygtd6DR}{Lb_x4Gd>Am-tspyE;D-UvC z!?kH1?)@yS8O*Cwn0UjP`Fzcka#XDC*ckij zx2gLQ<@N(-l8s2^y3&VQW??T1N;4tRw)pdw5d7r-c$T1-@J@vacs*%F{ezk;1{UNB zR;8bPiN_b1(Vqpxuf9iWz(kZT{+yOA6tgIizO z@dmGel3{yz1n0rL^HQo0Gf6#V95>BD9ITz$=6cx;Tp1q_O4hs80uFy6ZV(0BTKsme z0c%AxdP>H1wN@GaZ0s83uFWAkV`Zdcw7UlIHejVL-JB|{;x|$Oh`sBkTcc(zw^q+xD21>a}l{s66$ z9*kJ?`UawuNPwFuJk;SMZT#v$^1%b)XRMMPQPjXv$<&L^xj*o18x5i7+} z=3LEh%1@hH3l2n5AdYt^&NySj?Tk`TI|#z) zBC2n=a1dP2JzraOLrk9ZnaQ}F`!0P?i+gdtJ>F7~(Q7ug8BpYkD#3{DAQFLi&o0V> zxVeJibW^|tt|CPv%mM3#7j;4}>^yyjUiJ@1*S{aKunDkB*LtoGQ0DjHBWxcMNdHHa zVOT;lSVY0?|88|QCmPOZk=5Pn*T0k4XA+}`v=%!`F4&5ol@JK1wO)hs=32OIJ_Jb_ zg#Pm)VE`D_J@hxwv1JopU;niJ0=feI31a*=0IvZj@V^Zo25d&qv3`+6Av_cCdZp_P zik9Gn6x;prT~cyx3U6rpFrYi6S4V35f#*?zR~{yL)Nbn`(8KJb^0RS6H6AQ5myG0#sTXkctxkc zct(iuF@}y4KylsRfGPo_#_mT+gMl+DA2^(ozoU$V>Yn*(f!n|U>tz-QIHdp*W}hb* zl)^(+;kvwPEJA3g+Kc`R^%XQkAV%XRTH`*|1xcBtFII`!M3rp7Yxf8Konwcq1e;)+ zF9XML1PfZ@0PU}j($b^YH%|~>;1IZm#Iz-NZP+l!A6U1As@o-DpFUvX5HYyOF0X;G zq5B??(OZcA{p14R34qd~CA{65L>2kEiYZ(^FHD2%$2aKVCskz2`hHHDm!GP?PB5qbN%*wUapzbL zfE|*yrd=#1s;Hpd0nHbbE_l{#ivjR2i+{tzn;D=}{gXKWicbena9P7Y{P`}O_d8|6 zI`t~Y=1FHk0##7H?Fx3|!l58==KU%NW`qS$mOexMIrjP^`PPO54^#;nkKgZ9Sk3Gh zxBQLI0nyYz|1wI4t80Jru*74D0f08W?Ig7+>}bxE#Kb=yeXusbo@3J}S|0~QVeru! z_V)N%A%e!MBxGc59q}g6NM0ni0KkaG2CdyrTp8dmO#z6>t9OEF)b3gg7jVPJw>|## zZ7BHGMjapxOW@L*cc_=zBJ>0-cP|`lYMVO?_d0NYR|1oS-sx4l zdS~4PL6!)x6buUB1lIFyQjkov_7L5B z)FVAkzrc-QMwdtG5mZ@a4^j#qTSnE9v>akUnTs_DQE`_?nR+`T`X-8 ztWU7-bVPB}w-d)XIXm~-5CaxIUVAf{QQtHp`zm0!`TL|=1H?~8rg+l<#w+VB{QT9% z&25pYi!A8Az?VOK>ZHgOcvDw_YOGvee1x@X9E-R4wCwWl{<=#;^lzWB&P39>{CaTZ!>`}msekhrK)?R?A87%8%ipyCz~DdrE!h9!w_yI)RdcTGZ2xJr zS-08hko-qjQnN8}f^oHt-P4hz?>Q2Ng9g?f_r$+l2h`p{GOre}xStlU<}ls7HxW9_ zZT_yuPvFsC^1&yvwLau>jkxVN@tApRsSk~5+z~fxBhg@Y@%~`jz?k?!j4Z#?^RItz zMVJsZ)8A_ccFT(D&o{6S@BcNHB9O}bJKdWW{`1fOFI5qf%+Lx{Q3|PhM)iybkysKyl>Sn^r|M|1w z=&QGNV9PZ{*y1gVvsb=SSg>(De7XPfvxAl?M*lH|xN1c$2h8TFZL7Z@CnCKbv~E4` zY6jGqfBht)VbZ8OT4aE^?~}%p`t}*(gNT+PFNTZ0=3Q0wf$Gq~XA;vWb1y9X1gM8B zW~C^@bp9uwZ5@4^%SKC>L$<2Lhe%&qE&zQwH4_dq0C`ZuUJOtR_CdfmEt5vq(vlTW zqS<-aZ>YwBn%3Un7hL0@f)zzeeWM!!OuObBQ`29)v3Yp8-(I-}D~eE8%=<5Yr^(dDS6+iUcR(wnK`Yx0@e9mBquAo5RN~9>e3o(7 z7dL%@iF0MsH<8mug&q_e)%oD{I8>|vC>CnWky!&^vSJVF6c?igqX0|qUs?ug1_!17cLRGYh}lDQ~jFKgTft;q?NdM*zp+S1lcII00OGgBWwC#=lU z+o?L%VWz#5N#GJJ8f?Pp7V-1yLmVkE*LCy5UTi_2l4wKx9yUz63U>9-}4&E;3HK=0WxHC2$gT`xUH^gV8e8Z zn`d^0S@;6Cjvgj_2jimxCb$G_KOHc^BT&os@~;j`Xq3IFbVlVLjJRo3+VR8O=ZJlZ zLP61>**!|2=q%75b+ClMdYXFk9-DJ^WN&;ZI;d%-(pmPlC=3?xsP(a!AuTIO>>ZyX zXvOahdD-~==g(-cMbm`(l{xHX zh7AT$tNp=ibufsVCLB}!_(U9a9Ua9FovMcyVi)(ovoa-8>{w~NOHIUC!MdcNZDzdD zH$eyUu7V>07M-X1?^C|#ZxIvbzVN@<$^L)F{q%o(@&7AvSR}Z&5{pWu)9%5EU?BDofO$3Uu-0Ij$)6ri2swL!B_?7AgNCgg$6FA8xNX2d+ z{PXdUiKr}@LcBB4x!;oVKQ)_GGH2ovS?R{Cjf3~lekYw)sT;@;BCDxS>NJ%9EW*4{ zq&hV&^ekjbo&IiBLS4DRvi0cvhyQM+IFPf;+~=X*ZUTr!3r-$wXbV(;>hnu67J8_(xOj$7$rwbb>iz>~w8x{bS{yTN? zh~gng+|JkHi5%C3t#s#hHzzT3bpBa$Yt~kPgtoDvrL*?S88ZUaQV?-c%mexKf2D+a ze_h$zl-DU;TrD#;{7H^EC#x01s%zVCSo|^l09&SrXHtY4my0FhW_Vu!2~t}jX3g!D z$k@M(II_r^+#$iU1ZkDSSycGWEXQ}mRb|${SB-ln$3y>#wD9Wd+W}$0*zwDgE*m=q zfg#Uc|I<`bC7ghyD*Rk>1S=ViPyUzCChyR*c6$raT>-p1e=p&>2U#3+F8lKOQpcQ# z(-(KPtUk}J{pk%1rj+{7-}B*2=8#}T<&X5u;m#0tXBYOy*iqqUe}9yoSs|=Drf&Eu z!cE;w<$T`4TwJw=lC@fquzHffU^n?8%3z7($KRiul_tG^x>wFHqlGtTk5~Ova^|j< gAW0VQLzjtj*vh+uE@m{xeCX>JqS7Kc!a5%R4_Ij(bN~PV literal 0 HcmV?d00001 diff --git a/docs/core_docs/static/img/rag_retrieval_generation.png b/docs/core_docs/static/img/rag_retrieval_generation.png new file mode 100644 index 0000000000000000000000000000000000000000..1cd2956f78a51b7ea2b92e693780fc5912edd8e1 GIT binary patch literal 59778 zcmeFZXH-+!8!vnu$3BP-h$sS<5k){?q)8tOsDK~{NJkK9(xiqKEGSY{6p*e+QG^(( zbW|V|0jZ$|rAm#Vg^-ZEd!MK?pWb!Xx*zX9YfZ^HyFB&xl)b|Por~%mTla275QO9W zxih*5vSk!OHgIj;2%fwue7ha|XN&7OBXYzH13CxfiSRQ;Xb)~RA&y()0 z$f}ULpT}#1x$C{sqf{HBa4?D-dg8aAbC8w?Gk!)q77yYW6>AFJ6?@a{F5fP1+>G3Mz@=_rY+Gn; zTGYK)K0gN|ow0`bZzey|z2EprU(`FDm{GZ_kkEaPm-FX{pw;M&-8o5ZMT;$RTA77; z%K@_**yq$?z6!zSbw39oORrAm%@9nsPVj%544mF{sG=j}PC2#EjG15*vu6HlSoPh9 zgEHx!7xzt))?^QrEQQ=Im)fHp{!2>LXQFM~n$8^=vmtxlDcDpTXF~;s_T!_eW7Pfq z>zYC`izMvg{#v^wuFU@*yw44%Gj3`>#~{@;g(^+)1~WtWxvuEJ)UVf4;_#NVa4X8U zG%Lklqe9AGX*dlwIJ>s*Cl7uk1m;t%fMNgqG~8wuIbZph`zEQ)t1GE?CW-#9J!Cz! z;SeNMjnK#(6@filFyO7qA3rA|Uw*G0F2l+UO!aZyhU`1*i~V|+IB@&h$<=4i4GrGB zNs{!Y-G($Bs9*ncK#*Q1|g@v@nzRB6x-M=_Nn%bzi0_M zVGw?$`?KGF<(L0NXjE+TuQ^|CjEekcLjGU>5%sYhqra9?-L-lC*QC^~zhi!V(b;hO zUz5rD?Vo>%zISAB8j#yVeRbn|WIFDdiU%ucW2Gi*A%v+Ok~a~JEoHT&6GZ$X z?-0+{x(mlXReHp56A{Q}$keuRapV7en50s%LFV=p75&dNSb_UEja#o+6G5 zdpGZS)tW}DOIBmQjoYw>C_URm=972XBJDjP+sAoLw&k8bzL9>-?d62)K4t%Rix;S~ z{d%j%)O~$@RlJX~{I$t$+AJsP>|nysHF9#rS}~fGSpJpA&Ip1eV~0!51C2^J83?pU zQ`Nn5Dkg->6WRy%O00%T{x$ZEGvBJdaBZETQK!_7GaT1-YD4L<0@mm33`2wN4OCs> z8Pg{@$F&RdFL`zBiQFHysMx)H_0gZQ3-xDuiBLA^3hM;rg?)lS| z*NrwyEoR#uIb-mVV3GdqH6v%X%38Qs;ec+ozu7JqZ@h|B*nyp$s`cKFsYTq192~Sp z0sHSVYeQjH_{HSw=_xPeB&9;L!XGtOwtQ**^kwayB0fZqp~+F%y8f2E(dDNcmBQ&0 z`_`v~H428zPsoqrn{Z-LnoUFfTzB3dpz_Us`KWf)#zGF;;~i$x_NAKs8Qb)Rp?rtz zRXbu(Ue_ogD?)$Ykath_n-L{HvGzQf%Ld>7W#S^UuMsD>YWV2usphW&2R7VU|BsFE z#pYvJ!5F2bcK0_vC-R}vOSO2^w4wD_oqf3A`y z>9N}X(8Yf&_IMO)ql;Ibu+uKtV|0N(c1Lf&dt*!`;nCAgHcwmAJS0b@4b>d(;9iNl z?)#JP`e|05E_2}$&5v@;&DH=zJc=c1GXmsR?%uCVoNq|o#&=E`#1E}Y|+iTxr8 za+u+0`LDP=M!l`(V;w4#lBecf4>(Phjp^$@W`%R5F)ErG1NnAuO0k`s$W-JBZ^Y>@ z$KHKU&1N~i+>dkjvq&wjSco8v$j;mQ_50ztrqPU)D;Q*!mfSBNcy-veG=D7P9^g3R zW6L+luM#LqIvFSxDX7p#7cVPYjik43QjUGudRTo`d=q)d+XS&3wsUrV5EB#APWxn( ze}Ry{8fDv($p0$O(fQe4v5MjpX+Hza*$QoUvii_OqxFXa?poSAQ?}Ri1w0}xE4KHS z*=h%tuSLk-^@SO*>flb0BPXUyO**f>DQS~;x~@!dUj4LuVk=o!C-%2e?hgLR^Ra^% z%h(iZV8|pAgl}&b98YiJrlee^TE-*OTf1q+b!n+}iN33O}{A`$?xWe)9-ihWG9PWxszu;tgm`7Jk zbAt8SY9Wogf@pz7*Fx$-0flj{?R)!-*4CLD|I^>o)@>bg`A`fh#cX1 zb29(#@mr+FR|@Pd2~0oLZk|@yw|OzoXF8{NzN?6@8`mVR)rxZ5au`Hyi32Z}@((?` zExY5(Z10kuE9zbhg^6zk=gUc3)w*uTqZHBO0M z&ctJ$PCG@*l6;gn7nA+xw`4m(*9f`mr7XyZ!%vz6U4 z8+`RLi_TPb%J4D#VxofK)KTzIe9K1dV7y9IEiv(PS89*NQL6YsyDbkDFd(g5_I zswY>!Y;p-16EUa>E$EhMuPL{}Cd8&k6j8d9mGJf*8Wh7JX3;CO>a7;u0$7WGlJPZt zGx?f`z204K%c00`{!VF-lGjiKeI<&k!?M(p}_6`iI$ z(WnIjT%cmI&+vnlubfzrNFqQBLcn0^KKbROYppY{eC#!}9ZuzSx7Q^_$dY(v2V74l zdXq3TPov82qBx7W-uxq#OF1rMw@}qcNJ3$_AJ0j5B46Rn?>jDkphlzPd%JvJK}@$4 zsBsW0X=e9hC#O`jboYJ;=t zBBX@LkqUG5BjtZ-HBRe=7nAz9#48p*`a2{`pcX2$5%NEEao2EqVL^XUt}F)-zMEJu zMkB_Q$9sT+XZT2b3hNxN9mzH%6jypyv8PQXf(?14Pk}vzUg&2V)Do1v)Ggv-bC0gQ z;*C;azI%Y`Scd>CPPHpZv_`|xo;ueTbKghlei`TtO&M)na4x6Ry}Qw1^GN#It`v={ zq%~?n9GWRCsevBH<>ptXpbo-YocKmo@M*v+xf*O@&3L>?ZtA8*9`==$GOPXN1>MUO zf?ybZj%ytHt!f3Ve{8ivz(QoPoVOJVEeZ<_nDblbUtH1LM5`&M&y{JjcNu{!$XP=c z3Z(D%EwN(5QteKfplK$|%j}!kkk=)(G!0qbO zCCIjc$nF@gD74j9T<)=^)qo|zG=CV=4t2|w2+NO)MMTmx((w3K!|rAB z#dSqF(`gf~hG5j8;>EnC;Y=)8rv)>yWcFyuth|t(^N!4Y98x4C~p1GS}Td!|Ut~GI~*o9qG$L_;bEE_{AkU}jW%*6IZ zBlnTVhly&_1xCPtc8F`$D7^Jn2k*P0Ka`ivOSoL*_QQ_3nUe(r9B+`L2FturRp=zZ z&ELkGY;~hNekxfu`yzBY3gAzZiYb?$mlH5POAlz)ZVS!ApUOn?ZhYwzE#>qioaR*K zrjooZ*d(Y!ecB}saPu$3jEgul5jzAxh9Pg^Cn;a-6h1Y(?S-ogbl zbMhr)Yd>-YwT>$2Ckht{>VtrMq20Hy)^K2lp7GXQMDoE8nkJ_^Lr-@*3F z&XtoH{#NGkiLuhxc2=;ikw1Sujd?$1DX0Uk4qH9#11x3y$5 z9ASL7fr&Pdp|mrTL8IUTDJN(J<@NI_y3clFX|dFCLCAS^$&vc?L0jzJz4B4_Ew%1g z$j-^!0DN?Ou8bV1?a!EF=38AeIz7`@A)?H}bfE3UFQ#0h>#BdhnfJ;ksb2<~n^x_5 zFvp=K@gyZ*h2A3KAhlGqVD4^<-hu}6=U>lGI)i#OeNbUj%F;#Qlx$>kk zlTv2glDM#nYnT_tR4vf3w#F!KC9aQ--{u}Df&`U5%@`IlqkNTPeW-2wEb+=CAU=fJ zeveFe^wA8;=swZhPb`_%Ulq~a-5sZb4Fq|qj$MRc#aa<#r8a^O1#(^`q0x1!{0hC7 z%e7i@a}@)8z@^t#Ki_wb)E@1aAAgeDUqR1VnH{2)+oJ#@aX}MME#C>8EXFjz-Bbc= zUwgS6K!){5srpDy+ef%{vA;3t1eR~xc&qqXOiqs82zfBjWS_i;oQzBFpHRfAo&bGQ zTQpOoWpm5NyW?c6UANXfN}lU^H|K{RS<(WFXzsheojj(Cjjw}(=#*S(ya`qg2KYu4gCB&jd)JjehQh+Et4@1m@V{w zini`uSE1uXZJU;xT^g{H+rptQ4$hikPAP=zYq)1Cn2NYNpAiNz@nT9ohle2?U?*v!?tmC zlm0|T={E85{RX+F&j`LRP!Q*Z=|MPGtf=D70$$(5y?b@SFzV=D8+r*v2tqGCn>6Yt3 zz)@#buhZ_aXi7YkMSqcJKslp5-UpyB(5j`Br=UEyW49ugd*65ssCpj8m%m#X4zuox zkge%(>^|d7eGZvIjzOo{==sTFZy2sza_?TDu?a$yU|6h-B98*8hZ~HTxDYyTphCH( ze1P02a=aDiC{|GCDS~UZR=R|u$7K*$RU=BSH8jQ*yBbY0&S`t!@R?F@`(8ZLZ{qWVS!-IqOhbJ?MHBSxAcAmKYDdeb{DQ0Y}t-CwM0?od-oJ*7mEVyF_FNi?J;2+X0MlFbkbaHqRG|qU3(FFi~)?3%^o0Rp+xhY5StA zOaY3(@&(9Ls(I}|=BW{jl|wZNd~_tZDnM@gO#e!OSU;P-PyfT zy(BUs%+@2XT9+UA^-5TYNiBHJtcEU-?=IGK9IovXRwk1GUqqd2)ig-!WIGhhJq9L(t$B zCjkWS8u^k}Jm8OS%J0O@eU`6E0(;VtHOGu~DOQ^lI_OJjCGQJ975~KBOp*0r8y?GI zLPmppVJLOCe8)>Ju@wkSQ{N84v#QehuY-Ms)594F2DPIN5tt|59k2Nork5X}yJfjU zJheFzMgiTBy9(1m+p6d9!|`5#6Tw|VtNx$W?J!FR{E z2A{${vDHOgWl0015NThoK}M_3bPNviYbzBBd)KAZ14vMQHwpjtVBNIx zJM^4gBHXOZtG0?Z;GzaV<0#LezP`R;Z`NQ{#4|<}?}-Gwd;fD?#VcxF3!MT9Mn9({a7FeV1S)ipwBi`+Nma z?%N|6^{X3Z`-`i9$R*>2sAn;TW*8fP7QV}ErUwsq4x+%E%r>heeMeJnmC|0mgn77m z@%Zy^CkJ+c0p-QqW3&BLD$1^FjrDn=*IN>IlCi8%pD*vpXuJ+rhZL+At}C1Bi&E=& z62mZsR|{OPLnG{|@bj+BlbK$VcI^IS=aRyuvhuzI_12LHAkT{(s^PNf)W1&#GOj?t zvjVXu=)~f@2yQ&q(9H8}Af};q_kb$ITf2OPMUTuqAdGgvpEV|6iky^W0%uLpKB_RSZp>O- zO;`$K%xJTsulO|7ve|-WqP9QSN4TEvp;h@Jc&NG*usp<2o9Qc52(2LKnV5{Yv-)vr zetu|ULp29xsD5@Jrd#MMShnU8YvDbSv@C#0fwTEl0%cxvjhUl`#f7!xwK?(`0Nvm< zqACWb2}>1o56_PkKWTxr2*{#gmYwD}k!wbw4Xq8)FXn|TFQ24nRIOB@y6Vh3-yD3h zY8O(yW$zK$+PrD!C}FW+hGMNxYYyhi6N>H z_&gruo+N-dyXh-e9zn@7x2n;E?}*-Y9}xZy?`S|HLc8i?A~qSB{1S3sogN) z34t;CA5xH$2~mB*5oUKm_=Qc_l@zOq+`*l-A>FTXDAOtQLOUlX(CPrrei&Hf(GHM6 zYQ($|O%eo!A}53}UX#JWK}vSYnyCi}>>!2=EHAG#Td;1xNvdMcI!)siOx7W1Mn*HP zcn!`J7%INqK(#9 z2PjPST3aSF%x4BF6gZl~PB}U<>L!#y^#Ce6bdDW3=^K9SsA4Ry(%!)jj2gyY2NQ2^ zZ^p6<_CU(RSkuz)k5@`k)?UF$L3pWGB@z?Gzac_d8^UrQJex3kC@2WhaT&#>Bst$v zS1?A0zC7D6Xf`TA$}d$+ts ztl?7eIR-h0+q&k)J=|1%_pw1&#(o#o_@6w^kd$F_Z{ui%5sGlgQM!1*Jrt8i#_$-(;dmLKB08}j1X zb?c*0XP+>H&c1XwCwHxnShUVPXd=>EKkJ^dR;<=Ui=!Q>8XK( zeyrk=2S<{pR7C1q)j>yR;TiywF)iL8^-Jn62K@kJ$-#3`KKoPV_$%R2LpGjhxsj#~7Bx#yc62WA6ctS} zfecExy$KxNM5&3^R*zXqVl|4hPH}HFK+^yT*p}_gscX=}uY~elm@w!9$$_R~6>Agr ziFT97vOueoOPIE?8CV6WJ__*~M&%WjY2L ziSk$j*r1C763e%U*bt zfU3atn3>ri#iDXC^N~$}-n{ZsG)rQ<#R>IMITJujT?Oz_)X20Y&0D|b!abug-^!wD zN|#!flCBvsrhYK`4mwrsG}vqdgC+hnDX!c{C9!7PX*gyhyKRuj0W>$#_d>))lpi+4*iAWv z_Z7~6sWmM-TOm(0ti-*4n9F+z=JAMY23*<%v=^+w`C2;-w1%KmE~Ryw+2K*1j5c$S%;^Mbpa^LIe?sLrcSihU=#tfYKxmdY zo<8XVx^pzjqh#j}3V1AM4SHc%TC_?;eV)H3=#o{Q_$5R$xONgS{9PTCqN?Q=lQ0EH zd`d86?eEUP9#^?2;z*{As=%AXh#Y$`IB^ik81V!R5%BEGjexOii&pV`(cz*h`lELq zUk{_GmU$Y0(!2Hn-Fjlz_P3}#JDlg5@6Ped^ux$S`>TeHuQp>Cf%*S7kHLdB* zKi<`p8#&gLi|)!R48^M!A4!zN+D{I6z^1@!F|7ta21W2pH$A(DKAo(+b_P5}s?R|k z%K{iRkeA^HRua@UU#mi^n&%Za!q2?EQpuQt!-5_`pg#U`61I{xCNf}^AYtq^)3b+D zY4IG(h==MwFNjv^;dl3Q2D!aQ_iRa5p-&MHHARKut!Q0%|7XHz8eA?5$dhhm4=Dtl zq4)O%CM`_)DB8+47tp$a4BEe{%_mTL@DH^cnpVe0#Vi6nm4#6|zIYbA*3#&pWVGH8 zPfCEeR$cN~yTR}KSB9JohU!0c1WE?^F)rI76Wo-sW-FG+9467}+w(*XM9UhVK*=B% zLFu8YiVpsRCaDED_s~V$n&b5XCFv4 z0#nEm25E|&$hLXd6#%n!ww0Eaj)+%cEQZeWP_)T30?NV|#GNOoe}Rs)Jqm!*W8%zt zrsAhCtD1Qm!)K^_scwMsuz^nNW^4#!LE&GuK2bS=2US|~LJ|xDBp`{FLBM9>+rXJ6 zywWssNNMm#>JVhSP<@AUBDU75CW^G&c9h!~x;^qITr_R=wuT1bM^2P=7M0O-N?83t z<868)SQ;K4*!&p!yd+%*pSz46JOT=)@PrgiC3CVmPqx7z3epcj=9;wyR1YF39I;P( z5Qi>r0fw`Orzcflmv!3*4bTc;3kouScYe}GL7^DtnVosuBLNfdt0MhfoN$5_vt$^F z5wIk$J4CDt8V6uvy#vKw ze~ngIdd84`st`+7FU9(9KU6Y1xp%IDzSug*W)Ji#lBvGh2(JPVWW7pzX`g+E@yK$&%3c=BVqiCks^-|U%hQcz5Z?gMFIH^k)CsrM@(y3{EWHVpq zG08zsk=Uw<1^%hrDy+#QUNCTx!ev!HdLvmDDFgsjcD=TXMrqDgK8juuj{EZF%?3-& zW{0vY4}Va}qeZTSdHR>BY}QiLd{!8R68Q2JW{hi{i&C?&7bfV+@~Xg|ua~PkBu&3; zQk`9Cxr623#OXy9u)+KHcHzIZ2Se+vuCX~KfLKr6#sC8XL3Sh38PDxF=u3}ESzNH+ zYa0L7K)@z|!l@!CI9btp?$m3K1a0iNyQJ=>`5%Fl`blBFX;Ls znDsh*z_+XxVsdCt%0hIFjK^_V!1{X@*%AT`X&`GL&gwxxe$_JX_c496C~L|l6+S; zf%?u!@8?(Uv0^1sLU}Aky0~E?ah?vXbJsfa_{Xw;YWP{`(WduK0$`tN?epXtwlp+0 zF$uR1K4qHE4kMOU0NW(4L)hjg7v%B;xgF2BDQ3|SFO;KoP536CHd&okQ);48?r^4+ z8qM-?l{a5yMtQszK$a$NZDPr{-vu`gK+DS+y~8df1aQFG>RYKX7dR!uFLvRu`5uaI zZ?06Bde6?c<98Jx{w&IogIe`rqQm4tChQY~YYYJGtK;5+&3)bfAq<4Rav1CQD)geD zrI4|9j%(DxXf-gYrLT{BB%Mq{rXAK@Df2q*;VpPU$(*~ruWwfqdlArys*a26^13wX zLoWiyX(NNStffqm7D3D+C9Xb@IWhN2PLPvpw6V%ImSfA-`=!&7?=4$PjggHEEY{71 zyQ8JgT*Wtm9T2%LS%Z@-8PHF%yK$p2s-v`Tu9yl?!H}3^Dv-jQpk96>zV=naL~Wm) zR)Rr@Hs=H4MIoi@Irp7DRtR#ctgd8jaP0CYw?8Sn_4e;tAK_AZBeC-2jMM$f$_{<) zA$Me>QknB<#^snXg6z=Z`?%$lt!{4}t+bnA#E!EAaF*x}i>OHIOdk)T)6{O($$;O_YFDt zMgvCG4fNk+uT7nQQK7d7Zm)JcCnvz2&?1L*hE;Xji(t@BTw=Q-I3WF!n4p|r;ZdIN z?ue-7h(zHo9u5XDvNY6msKD2tm8=dRbH`}9u#d2W=5zBB?kSF*mG+oYR^WP9-^zt* z?~zPqgZ4N<7Pp!T`Z=&o3j(a`MJ*5gvDyjE%?8#sX5M0 z+sSoMM?Avo{9?PinT*y!z4%304=1O}#%1f`r!B45lSgcx%h6<;dXlk@bFB(d$x|(>c0(azQS`-V@g3Le;fqd&^1 zk04Sk@G@0`vK|?axvk>`XLV=Uv39FQm@dRbwe;IhzZJ&RvyDJ{p zwU4Za+0+r>IaUX=W2qyLSQH-t-tFfc9izF&IW$`|Ag--LKGK-h*=bj1OicV1v}9o% zM7TijF3A15SO3{`YtZst+JDwW8@I>QXtg0_rRHwRV?$1|x-UP3qDe)qh9>bQ0R5W8 zuHST68@s!O_$P`9!)Ld_2p{y|A3(=RJ%Q2L-Zfv)>|Pm;tu&RtNa?MXDtz$ToD?-b z=?&mal9U-}gprGDj!mju|{=230wzLR>F?a!4?g2fbqx*xq;RZe_xBWp4Oq@IO&iG+BIR=-*ZwcgkXlbUD;bHXVL8SslOE6^&KimQ@3Zj|@83aP6^T1Ls`EOcqEj}|IE7L|1sS48W~ zRFgT3V#NA4jlmX-&PVCm#NFbGMlM9BoV(PASx zX9jnIM-rD9&DPp4d2oi3JmTi4BXq*LB+UUHr;W_z2K;DxS-t=fAmHUe@us|Ym@as8 zf~_uz*NvGYo>djH-~rgQ0OPoCW{Muwk)b7WhCrJ|$F0F|_qy>2+l)cxaVCGccPV`Y zT)8*^rXg7t06Wz5r-2%*0UrR2Lz2ykpOaI0@jh+o|1RP1qXu6AXN`;d=3tQ*X9sT6 z$sWM<(A}EnZT>S9QpGQYmQD}sJH~U|zFQhzGpeCY6sxO~Vabau!3dc3OxfLe(ubUj zwtR(NH=CHZfRZ%K(LfZfv8KRv#Gp%hpce>c+)R*hY7h^rwGuTiKr{9%ZAkc8(5T5+ z4EzfmphnCUZ-B*}rfF;L;6sb+6@p)gh!m49a-PGn|{IE?7 z9$CKd^BP?Bw{f~=WSVbf1fC!|Saj6<)50HFyiloDLeaP+I4_F1Vgf4=omep9vKctO zZ0qTH0{f#&R2hfwlEO@`+b*^H)!dn+xunZC) z!$l))?S@jWmcvPqgIS)r4-ybOB|B&8t-;< zvIhhZ@L+K~L1A@!Xze!;pEVN<(A;VYLiy{wTqu=`v(2*JYJDaqGfK)Ve@C=aH}wF% zf4zAgWavODTsQji3O&k4b^rU;!P#DnUr@yhZX0xg*4HN`fTLhv+TL0CNo16uiUR;|1o`8Tg;9dgsExhN><91_v`As) z7B6h`k2&>y0Q7>yZ=lKv5p9ZUt*S=X4e|y3*i;8lCX4b5vZY)7faDLV7XBGJtGUB? znMP-j=Ojbt*~fO2&Z zcOu0Do8t+?U!$4a@K{k9&t5)XgIN*fr0 zT+Cf)=BM2)zdYudr~;wwhg4qppCT#1dy(+!3n*K_HLM+DP~M%K>@|8W%g_Cg?kj$O zy5-Qra;6I0RRV^HtkChiEf{de2yp8+4LWcKo7x3`5CQMy>Px=5af$@aLcs*&qLcd8 z`OpgNVqoG>8`=*F#{%5d|15`*s8FkP#h}Izd z+-GWPjxUPn*e8X`P*n+)ScHv;MW~_y7+4C4cELcumH<9n#lv)dhwZR*5vffoIww6=(0R|7OAz| zFlS4Ti0HOfbz|(i&O46k{DZqs%$x)MO7zfA2eN#N+|8O+w zfocp6MK=QdS$kR0JYMS9!FO&`nexRCQ3c^p(H5)>{F`1vQ$7&KyW z8mTsNaCQa=0nKLORD3A4ax%Zh2XT$pjgt+i!?d-xqaci2j@<(6E?mJrPg_F+oY$Wq zDi%J(<;~6o_7|WHyr5%HpFwWvh2NDy-gy~ZoXx0kxh|y{&t2UE1Bnr+eE9j@p*aRo z;MLOVcMqkdM&GM~6L8QPJ(BZw*Vy}>^OEpDL=<}GjR>QKj{zK$N~BMzj_R~>+GiN-S+37OC~S$-EwC-h8ZBmr1UP<*#yp>#%p zCI@1E1QNgH@0;YhECusX3s?8U;e2p8st=1!H;#%oReez;K?&`3yiRh?6 zuj_nMAOkM`QZuBqBO1DEKc3VLw_?-K3GoJz7S4IC<+uRK;5MrZ0*Qx=uKln|=;DYp z;k7Zs>lZB~t>(VaiKso}fkvE~3&0b}Iw>ZWvknQb7=IO9zn3v`2oe?aXkPpBI?xRS z-F#F=b>_xty^6I@6nXV)8K?(LfkrRj%?kAXI%6`%#}PHdKI4SnuE6PaD(J51Za4Kn zzb)k=NP5mCzsw{)_OQ00AO&BOBU->)<2AQuEqm>;VY(W4)--?D0$s>A(!e=6sJ?z=8exAJaX&9SrlDw4Xtc>YdJyX&RrOQC=Lo*q0S zWVOG6Np+W5Ul!dvGcz~yDe*+>5rOO8UdL@)dTDR3TVN_~sWsGf?PaHq+`pk>KsXLZ zplts7Zexnd{nBQXSVT(4B!UEocM|Ftoy?7dBaOclCjdfP#MKT=d z4rlkX#r0W&?~cz_bi&qu*aA!aM|U3%TsYAmMGstPM}CalKd8i7jLg=5gi`iv*X|57 z)}`*KL-R-^qGPZh&8JnF_Neh7c5yP7P$T$l=Z^;urCxOxz}zrscnA9t)Kt*k|BEkn zsYOLaaFtZvpZtScGbTq-k?jBV$PE3nDyb4bh~y8M>|g+>ad|_en{hz zu0U%&O6!LowW)^##xrD7$ztx+g^NQgO8f7+U5?^mnf zg>{^fvcJ#!Ks|Qt=(s=Qg;wqmwvUAW{yOYM^2WYcgGZz?M{i|0^TDe_J~jk%&ZR-+>!{Ar;~>0(H1 zX7UccB3?mmjjh80-|wn~aVGnCEKAnrT~Fz#@Lp$p(dAEV71Wv}p*5-ADlZ)=_@?+R zF<#oC%~aQ9FnFRRB1y<>Q3o)0J%w}2 zh*JSkpo~ynlDSDW+g@Z%gs!TMGDBj|UvrM)`rUVRrGAE`pY^$WllDq}kA3^{Ke&TQ zLsHjQ$BOxPmCwzb%-lA%PAs#mjCB~QoF)XBIvIOPY0>cI=k7N33h{5aqZ35G;=i9O zrjFf6)a~z%QI95!2JaC-m$?x%0`{LpS%5r0)o8C*Nv_*oOxA*fr%LFJkt^Q zd|_4Xduar$d60UgTfl4TC2u7+H~HB)0+ZOVzG;DGMy)H}lyKz9rh>;Lx56LBstS$u zre>C{D`vaX+WR>d4jFOW1hvb_n{y)^8hXPtM-gTDKsb=x{^HW&eS&`+4=#G_=&~|V zAH8Lyc+19E!h&O`$CB<*@n=mYsckXJ|7`OhyZ%`n(>V zHzRCk{yeX1H$795f5+-(V3=6&{BcE>i^!;fa0oGpN#uu`A5DNxuNoD7QBnT5qPMe- zCg3u_r~3hgrqQ}3NDiowF^ew@-B)Ns7&lUHl=n9)Bc%i8H z0_E!4>0cb*d^KB$g#_s!D74$$EkY>;)4WS9=&OR@#U z8}#p6b!r{f5lxJa{szq|sCh@+f_% z0`mc{bpAj|(f@bW_3M{Wh2hcga{2r(^5YG>Cr_b2h)ZRMqI;_UzPr?4cfL>!)@ca$ z#~$!+%X7MRA|<1t8~&llRn^9v!Zc(afA=>Mf7+y=@Bv#b!S^7+SI<4qNfMgf!5zI| zazpq@@xqhy>puUHcd2l2BYU~vpHOUvcJ7YyKt935d-l^b3zIA~&K%wNvP1cA#?lAcRt*kw+Z;oTImpE5BVzQ=AbY{5H<|nMW_h5H587iVE zASDYIIk)wTmj6g34og~a8w!FTTTr^75@5vhuBq>P`z~P*Vv*Zrr^Ss6eJ{8_Pq#38 zeWGvBTXI;2XfCw>2rwhwNBl>s0-~qA0_cH|{>2MdOE(bxXO~1u%PcfTJY$u??K07T zhs*}&_I#x>D;vhz2Q1pKm=U6bMGSPUxRO60=g-b)zDNMSypKagus}Rf;?vxA_Nz_u>99m#DF(6sQRV zoppfk4L@#FOJU|u>*Aij!P6k~R|m**f{G7qzUxYm3)RYXC2%=n$!1Jb#+o#X7fu|F z0%mdAz-eKy2X9h0YDR@Z`r?^VHm`v%b@U`3aiXkd^}U>!Z`#JXq<~7}A_#~Qe{~g& zJk<X7wSz+v>kAKYop zgDG8Bj<~HD3d`KLAngD6WUYhX_t2WE@=mHQYDj46Ysu3p{_m^(K0wYcP5%+EHj=Us5;R^oRlD2j<303P8Bw)i z$1rq?D#wos3x50m`@&AWNPgefNrLR@g+Q_it>4F^H%g`?F!Q`Z4YOl8)Yl;T^3=nLD|C5dAsrSOs{1x5x|Ei{gEBwE)EPm$6`Cl16+zH|jY3QaDKZ+@{vk!SGF)?5jhLQ+xS6we6!fgWfT?Lygf ze|2^?aU1TBsS%LWmmHt9KiuLH&_!ot#FSStrnNUM<&U|<)XHcNply0YVlT{!QW55K zX4Mx@#0J=A<_{9{UbHi-}ZnXMyMVb z2qaYW+I4JeP2y9~>Qm0jQ421-0L}@zzAeymYQ1ann+)qF#1)QsB`XSNRpbP^*=nwQ zy=~{`>4|hU)kH?d$+`K@s#H7qcf8_&SgEkk6Y>FpozNKb|TN%E!bxmXV z*ZCsJ7$GySIxcPHLGque#@&a3SliHoVZ&oJrxx0Z?eL*Y7EW$8>-3#R8iWFpwWjq6 zIuMz`iKLrBc?oKqhw(X1F19`2GEuIxZF8W7`^$D=_KWdm5&{TZIa<(kU54 z(s!&Cs5%LtVvm=8Jqub9mab;m#A}DF1OsLWM(48k+%^t4S#_vmW<&hU=K(PpEzC4T;5)E(E>nXhQ_>@F3mMZWMuiXJu9^;>7+z1u$cxmsAVZP$9lrh!jp=yz>8} z>dV8S?4$QdQY4j0B1=h;Y}v9ey{Sy1v1Q-N&LsOXwxW`?(uiy^WSwkd8)Hd`?7Lx< zHO4l!VFvU2%=>Az zM?OCy)oW1%r5{o-gC4R@tl+bH_m6IfTn;3}(E}XIxt^yKqEG-+CC;WD4$Hm02=- z_C0a3yH=L+-|^_5{r`MJqPAuTjYfd1sW1Pw*?LlJc&#g@($)Hnz#-M1@#L>sLxF!! za^e4rmqZrSqXckngcJqCqaZ<`;UH2sub(|;XIb!Xy!1NJu|O{bRK2iy$0y=lt6076 zu2#){ZatMf!_R@OrrLM31cp(qW%O8xrQb#k18}Rt=C38(l`+3XE4wxKqU~60l9nX+ z{ZZ>|FMaY&>5pSrpbY1~7`wfgeolN9`h1}C*}B!6sD)q)q@`A2nS|VVdc^AgOXh*w zDUqbb#nRv!*sIbX5b%4|+%CiKA-j{@Flzn@J&=~}|J&U%w5P%T-l_hgsJon;Aoo&H zz2Ynp=2cGrb=&_kbs%D+NiX&U_qD`ICFWkxpP{cT|Mf_M3LvVE*U!FQXDe>3{UAxd zUN#k#x)Yx0RR)o7J3Bkv3LZpmW#6~lGQMs-m^(3iGV^=*r5@s{@~hi_OnZ z@(#B}t+JQm7G`{iR(E=tmZtxM^9+Z$n*!7oGloQSfqxa+;9qFQ_HLELJbs4CLnveH$@k+zRB*~*qj zJ~$K2&Xi)x9f{GH?m5*AvYVs|U!vOKiUFhklOq8de{z0L4x7U{&D|6wFux-F?U|x; zgT6=k&ka?U?vN$h#w#TqkqLhP^I$qvL~csHHBj?+OVAVDdj5;#c)UShWEd4(E}{8! zlJ9+XP}POL`pLn+guSpEuUP<+6#YtW9_Ygb->(q2=5g&J*^ zw{+P3d11#T-H-Tdq@lvB@I7m9cC05j-POV9`o=>+j`m{NXy5&p?lbIo@8f?o1)Q<& zE?vZ>z@JX$&+iDrcL92AdjnE53;#FkWjP=6Xuw7e_nTM98*ktD?6zDT5s*cgO#~mD z&jvK}c_$;zRyb#KisOiOlW2*gV*r0?no?SHcb>Auv-?9xEe zwHNbO6Ygez23eg{^R|aA`p=H?>Kg+`nvX`hDKh#;?LVD7BWH+tUD2zmj;wzeb!RG{ z5$xZ_V3?mnBbeLM2BQ>gn=z$B!}z1$@}=$DT6tR^zE*IPxx_Yq;NfFZWI6-i)jER` zBwQ$e_?jcbS8GRF-$L{?xry`ehMNC$-&h-H^g!^R3lvSxj#s+j%2l_|#&%?*z19#U zD>L2UCyTTypkt6ra|DILgJVagm@uI>D=n@7Y!AbLET zd%tf(12HJZ06WX;62!B`g3P300V##ryaya%D))#(lyvv+j9y<5Ked`v1Qo`aGJnSM z)!lKcI(S%maLV8%$tD@*O5mNs-d>j7D=w|{z29EX>B#>nHtt;Oi)m#FPzwGSoj)fAXc2rR#sL_3rFJK`%0&Cxm;o1Bw!C7l5!Q+k|Qm{_s3k9R_VNEtg+)ilEuF{ zfGA<%PndA>(caP`>SQy;t93}4MZ4Lxy?lI&)*pubCUGvq=gnoa(BBh}{raoyzIzNy z3esRw@W~8>DBA++o3utbsIMt79=*Dm9LsVIEAu)OtIG;I{z1FhwKly``LO+amnuSc z`+sP*SOqQfZ_cOl>l=!`^qmu4I9lJ-694(|JY&Vj!G|BTSzy8RYzLx@gIQLY2qS)z zcm#1)S*acKduk5|f1jfoiS(O{4eS>$4N0LnFm>JUV0KdCRKj#cXyyN8<5pc%#jNmF z4SZx#-1uX9joSd-HUH*}3*fJJ9cYqSDo)Z>2x@JnPfo)7Gl}%CaxIle z^yAI#TH@1FZX!$T+ss^TiMriA^pQnYI9w&uIY*;+NBQBKH+YZp8V4W7H!!2mEf~61 zUfRc>q;Pg*e~nXHLTH9+1@tQQ-@A9|^|0MPSGu}u2Lp?sv&KAg{`=ddhY6cUGL3RD zSf4h_^OJa8_Am!Pk;z7iS6xIhN3_P`mBJ`c_Whs3*daGN3t-$IJwN?Y*50jER&~mWqgc7IOGwHx1z2Fb6wJ7x!V| zjmxgDbZk|GT*A4k{)8!N1^ipvc&;;t5_WU>z7$VBKiYM9mcePh;9d9ihwWem`p%yP9iX`n*vYeFg%Eb_mT5`n)cp@h)uw9Brn z@7L^~AOKit5KJ6l*fSk3hA)AupBWjAgK;LwU+3wn{l|J(I3-q!U&99`0zbIa+QYOB zu>Rl>s6Rq6EQ;@2M?9#lD@O$3FE=uMj?Zc0oU^{1bvKlK=krFV2rB!Qy{_wYD-(Bw zEY9~JsGBV;kcXGS=cey67yVJ?);NsG_~s5)BuW_j)k7Ep-T@+gpIPCkKA_5x7D3Hz4g|G%UtQ99YrAhxG6qt8|Rpho0Qc-RpH3_C^Q$G-2E`}_s3BTeGbo!O9s?7Em_01}g?;uacZaUJ$kPi}( z=v3*m2!x58*#(EV0Bikv#2(enHU)>w$*ceEpaD2L7TgoSjwG7k$cxlfWtm>DW7FEOXZmN084YVb?BT=jP2wQs#X z{qpj4@NeyfpiT;ZU=g4Gn|=tz)!Dzc`L!=c^ob%`rBm7#>yo-qHma$ z*e}yymB_phr+65X5^tf~gj3vrM_cV9sV+a6WS9}}fO}#$zul@dG>igf-FAfP>)xe}ABN^U0WYp(j z9(Y}dHJDv1HA=Je+&jZ z(3EqZbN-C-RT04{)H}B^w99~G#RrOq3M}kWY}Ab}5FB;4O`X4u&(Grf_!8{66!T{E z@JGL`M9lf~HJ+ri?fEeBhdWM(k9h5kxEXSVwR zqZd$;V#>B(edd)e5n^xw?CsZ@!8kI=wnz1YMg3EcR5Jts;q~8h-mbKb!`^4C9*p+e z9x^fUq0*mt6N)k_71)5=qO5BI0fH@rS?9V}g2S7(jy_W^r7ILh$9J}b%)D8p68 z+oLGNeO<&I&yM>)_HkK?5Kqh6gSfhxgbUZ6Xkr22eL+xTJ`1PrY>#1j2tgv zZ9@`_wxyvnt_D()%uH{&K%`0+z?2E;xHblDbHARzx{z^aMtxU+@4HScXt-Vg50Mj` zEX14ur`KDMUP;lnrs3d`CgI5S=mO(fNHg?O9c*-Uxx6Pt+zqJ}h(HnhR|bIhtKK_f ztlW`B|)mqpi1Zx)`pdrc%sI10evSKYEPh*3IYR2&0UCn|f6-%P6-{wV}NNkc^ODySZ4#?R1<#?xu{#{cO@D{eZDaw%K=T@h;?QvZJM) zp;H^vSF|T*;P9k9Q|3pI+*A7FSc;vSU;a6`d?IrU)F9&Nju)*WX3s7~uEW?YU9x%a zxzrGOTMCo5szu$95dkmc=27^r40ZL5)xYY9P>td_gr(WgSW@f@U3=TY6Vt-Z6U$6~ zp<>Nh$;4Y)0fG|k$EV;tblFEWRN-yz>@DttkmcUydXlb8SskfLraih3?GIqRzx+Qn zep|+K`lXf}*fTLBFy+QA$vj(vqb%9R=kgUmssTO|%~VJjoL3linso33L=~6582B;# z!A|!t-kf=P<8@U*xmPbfZ{3o08s%PL4G%1V>_TG$hK_Ca?~X%$K^ecRlu0~-Ivcu^ zU+%~E9c$0tW}$3e%JjYB7?u+7efLD7BOfQ`4N2A9TFiL=YS>av)UUx0HMZY@eQgGJGirXutKsX;8mJSK__Kpc%k?$#ae4$6e`a+sz;Y^}%{Ezqy zim3|L+f+kFCks3KPHbZjYSaD}v6IeoC$Y6b!V2r(h|X)RZE9_gi|Y5E2v4E*J59=T z-x8|RwK}&*ecjqQLpES7Hp0I~rr`S@kISeV)6Te|Y^(xT4IDF)V)OYP1X@;2dFl<9 zA@hb6iDxSZ>m7YGjTu}dUE6s_Hf7q!n`q5UjW+ZPPUjIOVheQ{vEK#E# zPxakBm#rznJSkE8_X}e~dJ2i>Oskk^q0-$@7n0H~R)>e;^Uq8i(mNr$l zSWp_3FVBC})u0U(yl&VZVR-KFJ-tnvT6Tv~3I_6+?yP7GGQvMEZEHcAn8VTXvP>JR zlY7{NFLO8ULBMXUrJyG+`c0AV^-ko{`jqV{hikzro9B8Cv<1B$ql3{%f>_s;6_fFjy`?Yd>eiMOV>W~r9O7LVYz!wx;*nUh|DMl} zGiTtRMrpN(Pyx^G>eoiDOznwu)Z5PF@*}qEl$WFqlysG2kD{svZuPA+e07mggS4{Q zt$KM4R1G>7A-Rekt*V*F1}bm$-Y&=BbYPiMm9uk3XK+ZTQvnpK!1XL)qgces5LcIz zk)ufwNO9#p?16ZoJ}Pgt)czyRZIY@-U9EQJmi$z0)Z$D%mLP+UEoSDv)F92GeIpXS z(VO76EG;u#>naiZ^SxMnt}eR^Q~+_4Ysy#8_Y)@km(m^+sWQFoYzMEUsK(yxJ9BIN zRXu{VQ;0cSb(i}#LyCz@DfN}4=tz;wtVZ~ovgNYP0ai0vUJ`2)E+1VK^GCI^IP@sfq*&`Q74C_p&;$WU8N zl}+Ngz8kQ{ReIqS4;_;IX~a41dUaS>+il_~dw`;mS^);l(Y~Z5;j`xZm?7pvVrAp& zxl141kLV8j{8P$Xjcyk1Q=~jp+k-4Dr=IJ+NXi!; z5f#%|AZ^EsD%Xe5=6WtD8%vtI)jVxUVGw);<0Jq8nRltFdh4_32fyuRUc18x-Xmdl zV?HB46a7+M+o0Jl2Db_K1k_`bfqV_aTd{U^r7B2jUtYljpkDWL8?|mt2_)nQo{KRg z45J2bC_qGaeFqfaU9BUzk4j_Tr5yC*a?VuTaj>+@sf{E(Cni+69lGYLu9*}bX-|YC z@Nd5V!6K3SbRYZQ-J7)ZIoEg)GQ_MaPz$M+UHKYz348M%GDZjq8qf>AEae# z&|j6*fhAfm#-TzHuZVETLs&02@Cl&KTOViGnEMV0Dz=_f^X(M@D=^x-ztuDSKWWz4 zyTpnw4PmBCmy+1VR^+x1U3 zka99|d~g1ja>KeN_AK$N&`>>~Uig!a_zCx5AtXuK1(FHEY_orK94c^J@b1f;a5LaR z;w}}(Ts+^k{3|}a;{Jdc>kMM|7uzxRneEEI^P#aaAf-z%Z__i*i}6(Kz&h`S(`P`| zIb;_xw#Sz*q+1zruZ^7(E8nR-IGpX7nUzsHgS5a2uGTIzEmJnuEoxVRWn7aCMD))}Wllhtc1Xb*3xTi{b!(-|Qp$z_0yJ-V>px zukaRRAQ?Jn;%>*?5~lil?WEM!>GNxZ&#D5Q%XMzWM>xPKS*&u_==l_ae1`yf@ZR!0 z<_kgwA2bL}YbjPfo0hEY;BjVM7*a|m|A*mu_6ibr4t~WWM4}sOTkg4!dc+G_U-hVT z7L<%6@$VY7peg%^=)p{KdezYcnG|Sv;!SWL6Ctg@l}K=d1M^JPdZ7YG{spv!OGoN< zllt07Es|%W|KpLM!ESybZd0hed+Cqr$5bkk>1!Iq#-(D=cL!6?@z$aD0rj0D$mg){ zll#}T{GDgr1B)P;&??!>2 ze8scX?+XR^RLoirC{uQI3&grr@A@*O@#_{ex+zU_jrx{hLwv1EGhTJ)@eiWwP@D+| zQ*5#B$egF!O3NEgJ0nNO#%JeHeK(ld&UZJ+^4?F4c&32xB4v{ASKT*X_(OKh$fhH9nMGky!o?4@F=*Ndl8gcN@zMW$#^>?cxL9zdF;YAns2}h zPYpe%5$CO!Y(eVDXDAR(V2No@mhP&Hd64`~_lGa`N7W>!(DpWbANra6CVs2Vqcw=b zjiO=2dgh@hFB5eu^w93Hdw7!f*2PU(2YEi~yE)ai;2%;)5*DSEC@K%L4Fh#6RUDX9 z7j|QNs%jSkv^Q(Cp)v}NM|#B~C*{w#cAAm88*Z|xpPGO9>Fy41K&nRlf7 zR8_)8lIb@zT*iuWD{}kr5A?iaRq>Tc^~jz6uNF&rD?>wL#{jN`e=);WKeu@@|7*!C zKcxv2Wd8G(-hiIZ)3D-sTL;pFcKY&WcfQf9(9CyVVvEEhC%X4Xi*EVsm>Siq2Qmd$ zd7PYQN2msYayMpgsGK4%*N$4yp+5d+SUAeP@Py{!XS3lrq?W&Mz9SgUxiep)6zy-(6`c%=VsMvOVUF6goCkN%mS zzX1pDJDWp%w?uu7lcwkU@K~Lt@FF(d zw?Bg{FR)7Mj;_j|mrD|(sZ~$E9ci5;tG+|1Z1ch2+Q%yvO$YFFd&!xB-30QEM^4i* zwdrvWLD3k$jb*(jmug);-`Z@~guhSU+(f3QMlw(Vp7*+lXl^UJiUq`k5eer=p2(|m zhOa*pWazpkcWiF5>}e8>w_+-UddDi~Y6GMe`ls%>^To9a=Yh+5{r>m3OD-2dnZ|&Y zPulITE1R!@eB`|$_z{<`J(`O*=KTeo0hwMo^qwXb(yY8`pLZ^>s1OAGSxS3`j=ht> z-+?gX5sUmYXzx^6f;>WH@FUiK=XLeQ0Xp>XSHbISkKlt%$QBf@ro6+^m9%p$@+eiF zY$O!9KF@#Jv)w$fD*OX~1N?L;8X`+=eZ#aywK@F5q}tO!Y+p;Y*lBE}e##m76ld>p z^M-ny@QZrFa}$07)nAw`@bG<&&Hc4Dl)cJA*kKPc-ChL!9;}=IK9r}Ko&e+9I>X`d z3h)Ml7~U!$*~0GxuisZk4u0caWpmZm4nGlhM%)e+jgb%7hy$yVb3=`CRldsGzV3qa z;oW3xM%^2!TPkgz1XL9r(ze1i;i+1V&d^KW%hh~LRfaal3p`h^slkS3@(>xHnjKWl zdG>;S7>#&5Pj#`fN1^pTS?sCJw2?!x;T|Ru%%u5{tsKpsM^hse>xEP8+Lt&SGS^-- z#o5cXp~u{6BBQW>l&0IsZI*r^>I~WgSVe)3BhQmg%Ecf70d2kQ4sF5?7LfDVN`hAc z`rCeLWMLtr%9NIPb|t0rnHMT4J^le4);SF3sMF@}S{vu@k5B2ihdN(@uO10tK4m!c z!Y6+>`;x&o3HOm;pGOv(Nq3A`Hem^Czy0|1N%GDYiF4ME6C%k`WdfpqO)9jT0Vyn0 zZrk&z@9_O}j1>RbtoR90j)KXZt=z0CmXIO!3I#t(FH-<#+cq30*46VMym762X=>Kk zSzzGf4soLH^5M3+y}t9wHQHfgV^)PueCT1&IHe@{%OpqC;XhWwWrLU8xlrWmPfd~Z!;afQmj0qWa@ zG9`}d=lmY2^1v!dRPRDf?FsXSfCy0_&T>4V`m}yJ|4;e-@D@b@Rne&HlMCO-{M-Vo z0lK&OB!d?77M^zb54p_Gj3B|CQa;!kn7*xw|HA7Lqs+V+_9#L7`zc5u??Z0Mnx~|+ z(vg5Vjzu*V-3%@TYw{2Jc+3{t9IDZZVpijQa3L=(p_0frt6NBmsUk}39SVE7#1}sK z%7}q1)%}U$J$91J?tQSox5;wxT54-=?tGmKyEi`E%3fyRAGw>usNoY0pZEGqJ+~gc za=)mj>^~}6Nnjrg_M`f({lvIj+%H+An-(09#m%*{H+fHe=Hxw<%((*P!dOV5gDgn5 zoA1a}-Keq9B2PqZy!Alb~Yb4;YqoAL2Y*80mp*YOnk43_3FK6On(yr+hP(nnXnDBQ{I~xwYCk|K8 zhQXgXLjS(3o*PzvRT<9^JMW0y=MfQ6(7#vm4&om)MI|T{{lsz&X0>sFJb)Q`T(vr6vr9m%XFS2D`2|aIAr3P3pXdBIAeSoqptu)sC=E5I z!c~P^ivm=4Kggw6(tdo+115*gX0y4addXK8J#{W=*(_GG%NEE-F0Iy5r|harto(0d zg!%Yp?9r;e@%6blRbJgxv%O<5{tn`@8DEAHts{CtA@Z<0*++}(;*Bx~|AsZ7|3CI0 zy7Nen?yvl{w|ghm!g>v%xH?-yn&=om3}yM-+2g-g?qW1IRmLuyKpyJ}@g3BHgT^ss zqhKuK4QE%YodL)ik61hDHaO3v^E_R0|Flh0W9QEgG6Ygzf`U+D|Gw`_!P}19$2a8GY(+ zy>*Rjln=hlxJJDOvtgh22@6$B!xwKQ74G2HM0wSz zdgq$oEpC5&MLOKTyyg|aZ(&4ZlCAEtT83=4sM&j8+mUAtSvP-_R6#>6JZNgDu;x(N z3>=k?6!$AWq*>`-6BQxMH)u9W<)ax{MJtn&h!&Rk-!PL%LBqCPg@1&MSU<8uJ`%+u z|8$G+R;7#|Iv8`KQna2RUIZ=PI=Qp^lNtGIYL;|O&m%ZYdatf$3x4(N$_^o%R&w0)!u5pAV zCM%F{pF`_qyP$zRbx?=S`uQ)G-I(>;Iw3&hdr@ciTf?4p4sqD;Wkpq)B$w0)6#3)n zjtk~PhK-V>kk-y1``)*bxW^<))=~P4lnC77`AzUlE_H5J;6I>SE&qjQ&j|P8@4!#P zJyZ_AgVOf3hCQe|1_=4!OeH${jqx=nxC_MG(7c7xJq>r zZ#+V-dA}iLdp{uCiZ!oZ_TKtR88zs|Qv@H@mDV$`iHv_+48efQHhhdn2w#Qr$fSO( zI{$IaKK~CI?S(T@j2SUuolAPP23s!$+xQduS0sW2r}f~=2ZXxZA>69Z6)UO5O!uG$vIivnvVQjct2{$)*yN>G8ShkkH2QIAq%2M=>i#=1E@P~vJP&zP zh$-{x>crzTwD)UJd&C{=EAYo zMtgp$Ix}ui^9uLCRD1`|QH~2&8Zk9!4z78z1~_V`d1EFc!c%nOaV(@mOnoY;m3Zn( zBuS!Pox$l0pjSKpS{&4UMVTqA4mJM**AayFoxQI^vJpPtFjYD;;Vd`T3^F=fe}BYu z$@LC(I$sOToLUflg4Av@7ORii81Wnp;wV_cktfK{48>MV0^Sln{2USpiWWL}K3k3q zR#NlnzE&2`Sm4uQyH?_la4DeJ#iptTbuXeTr%++PHK;xiMtoj zHS7>y-j3trg(bBvgIZpoXcba0mD~+b*KZP)aex|8b-nG_Gkos*0GB?8XmZtR5AH+c z=9lbBa|!3q8xED!j0^i}b3!g0`DKsX1!)xT?W+1>EkXAwob&9d*sIR@L z$U4)DqoWU3yka_;o;gn`j7UA^9}UX9p@M9FJ;zwvC3n&Ix#?K@RFfH%!c!E<5oQOx z){y28Zb#i{VZ;4*Uvc@1MdAR~cqhx{Lvly63>trsYeV&wmp`O5UbV$xlPPhN6a;Q- zD&wz$Srrdpx}R}b-4JEIz!LO1ORdL%wznCw5Y^|ie?rE7e|M>kW28fVrqFf#LH5i3 zooxUC!D{!AB8&VU{!#3AmgA+W*c@CaPjc&M?#H?SFTJT**F}oVL*+~JlFRP>bpRiZ zy&iv_uPez+RwDL>0UL1!i90U5+2!_$S(FrkBV_%BoA(Mj#HdDl4(#qPxfQqrq{EVG z4$f;2f2EM=66n`q4eE`Akz2s%@Snt9vmWg~KhkM`%Y!mM?FOEJyx{ffQ6J<_%|rKO z`NB&=hILg%tBM1=;}w;Xrrh=+Wz`EDrmx5cY~LU8Up0Sxr`(J+5|xV`e2B~e?Ywnu zjYEBxX(fh&S2M_3EU6GcpnU79w)Q;N_sRe4KaCBUGvMJh=NU^$56v90rkLI5Qgb7Q zP90oj4_-bc%KOQj-)h1UHizJi-Y6|Y4L3EJu?bk4T9#j8pikmKK$6ZhpeJc@AKBj= zPzA?$RSHXGzw}p!WtWo^9(cM%>;v4!=k zt>#|{Fatt3%d&}QhT47>XBk4rwoZ0QUTzNMSvPk|eRvHlrgatLZfTZbNOpJNKhXG9yX1a^KDFZ4eMp{|?BU>y5F*S=&h$J&o*C~XJZvt=~L4FLmKXrf1((k zEFL+@UwAn$UCY|p+hyu%SlJ8FJc(*`rA7?b)Xhc(3 zR$$Lc`iYWoj^OoA=@~U9cPs*Si6iXZ%l!yydd7|8rkJU@@cOn_B=X7ry&D`-x%0TD zX5T_>|C5aE0zeNVN~k*W;5zAJQnaOY5^~ z@&~v7bfR(cIPbZ=R27qPV-<30(NoD-dtKz5y>aS&a0T@aMtaE#k&sT5zP(JtP9PmZ zWvaF2IlPb;vI`4e0bbDPkvz{zb+GX=`^>!LZs0}lgWPZ|gnz;{ezIK3>iNhd?PKIt z2WrF{3JS;%qnr##__)?EN|9;|ZZeI<8DOA+ZZ;>gLPy%PNoock7imDHfv)mII=M{_j zf^Z#+$1cj9dOGX&6Z2I3MUokdXq3Eo(Z8fqum93#VzexGtsGx zt-UEkpjAv1=y8KcXrNfsq1tEkXRoBU?pq)*8Rq!<`w~ZjnL-a)8=1g7)SM0kZe$Iu1 z?~tv;k4FO3Lu#MftYzZ80#>V zma5dfZKt5+*7SI`XA^a>-}ZsMSlBb7>eCf>fn=W2wH)q`U9s-&F9#{UgRy7)J`byZ zFnKq=K*5Xv&w|SVbR}RY##UF?8(}`z?^!52U%BQrL3==3;B}jI-`fkwY%X2l9-(b+ znU(kcIj(#0Jj1sW^nitF6a*J7&GN8p?0oILH`4xX?4=Uz&&ronnjG7F+Fsv#yLoAu zV_3@o4fr3EhQ9L2i(M{zUJbEkU$@ETgd*zu-3vU2QH2)w-ZFeM^_V3@kZFS?)R_u@(3ExO&Kfu9Km}8c1SyP z1eI78kkI@hX;wPMK7Rs{0FIxKg9(^eO!am@9iFg?8ykV$YSSZ zSJuU(oNv}YnR>QDi#AFt7elB!7k0LGJ36T4Dy<51mUpFm;iaO^?O z4S-e=Z-57c!3CXftR5H6v<#?K%$ta*Ip_*TN3S z`MaNE`;FvpUO^mIo$rYcyw`-KbJA{5x&~HmL(pKXhdAulu`T>CU9jMat@xsqZF@I1 z`7V7yDI~2YLYF(ymOc$ha2+Vv~wzg?vd#}q;D23Ukh7- zFZad`zxg(YJC#5n_Rau~3!@=wsY0XQzco(eGO_<}d+UH&nc%p9>jN)qRVP~)kGU1s z%6Dc%Fp-tktG{p!6&Sw;9qPw8&HQs~V68JxFVvdWYW>gtPZO3?+`0OeJhA33UG(@9`gQe)&l%Ok-!A` z-hb-JDxmJR%$(e=z%q-l2>4=lEZdCm+{|-c#hY@>oCCuOnSP6rn-sqC@26Cy>Trpd zTDw`ZYW`fT(s6$ynlsu#A0ioiNV#*564}1Q#SwVqyF#!32$1Guw28|F7u%UcYK^HE zBqJfPFrglYo^(F&GO%qhr?7!+h%vjT>oXU>JzRy8!)q@^@p^pqM+7cobKJ32us(pk64^$lPaOq{G<^|gD{}`Y=U=j$V6^-NQ;O4gZ5F>BTa@oNk;;1*YFFj znQc2_ArSD=i>IXNj#%4D9+RLUBa^4%zKGiabOJ0Dnhp#}zY~B#z+%|1#VybsfhTvwJ649*LPF|DK&on> z{vtL!NLI=B6b})G@*^%gJPbJq&Q188qzZ)4PM3Q0f*L}#o@7yRQ^@tM!(7M0%%(|* zG!=DRL;NJfqcdfZXJ0wnoNMYrP+C<`0}-%On%Pd_&Cc5yq`>bN287UR)UlpZqc5xI zijjB@rdDA8H_Mp+m@JQi+z9GXjnFnQH^Lhi3V+NfGy{|Ljh8L$kC7Yo5=j$>lR;(0 zJpWEkkbl;U%{koyHaNOZ7Jn<j|*oFTVKWtn7fI1Zv=gA|sCqR4dM%dUTxM+!Ud)UPh&|Y!GhVo3UVR z*=8R?JtHVi5d{Jtu=_Qb8SNb+cz1McTXESQ@4M16HZ-ip;|G5!=w}l&w4eWK=MZuY zPXzJ-t9axri~$YL6ld<358j%6ygQU?{}sk9S&2>JT`o07GSBVozR0Tlrc2949IMx5 z#ZG!neLrZ90IlO@HHnv|l=Ey9P?8N(q!e;~rxn{QD${C@7&;PTP!O^|Cq|9;OzS;5 zv*=NOkRH-@qrMgLWWu7c485iLU-DmFP2cS806w88cHW3THFF=$XG)aUZl-g6&38ni zW9oBPaX+_}fmJ7D|9ytG5W>ArTvh#AY&>V2^3A#jRIi`s4)CgAQmClzYRas#exB9!sf41FK34xz@2jPNdefy(hTLC#- zQcjn1edTH+xE4X);;2mX8X;!eu)MeG(a*uF@9Y!N4UpGF<>$U$ZB^j@-PgpCKU5Ok zB`lC;J1`OS+UW01jtgeMR&a{e5Wm&nftF;OJ*Ugn>e@%+XRI9HXJGbg65dp(@#wy* z*P#_~J2Ap!Q$6(Xs=LpN${FGO$YON=-wBrtKg!|IqDz`b2!_mD~p#STf&xETPGeNlLxaQfB75S?Ye-i)@Nj=^87drM#mQquig z?VRc}KQd?7Qpf0u&B;oXba~hH*oOMQU*291DIRWW2sB6C_iF)_)CyT3SDG!P=0^&k zvv^BqTZx5`UqN7yl2w|=UuqbY=o;$w2y8fehjBDVpOfO~xSQ|canYr+L z+lKtwsX;b|#n?0GlrVz()9^alLHMDNBOL!g5G{+r%Tokv27=jV_Ij8`4If%-%WE~dYBVU_Yr0oZIyYJ(h-usPT%Ej4N-FPOK z)l)t84#tg6SvO+sKo!Hq2(z)abwR5^SAdhxPh~r!p}j;;6Rk=hVbT{Sud&sl79O$( zvurKjy&HM(b<4~VyO0^8>n9p~3m{9d-exNpm0-D81G7P^JpO299a!|-**Z=&P`hE| zU4(Pc*2O}|uo%1-+T*$0_v&dIZcUAS_(nB$ftdC*J7gf_A$NnW%C>a_kmEKME*4Y> zEN%lPVg@tAUSi5uV;^6KONlD?g{)bgPx_49k9-h#F>y?5J13tFYeh{jzi*}?+zZMYlnz3~Otl6uA5;6TDv#&6*s84(e4JxLLM zq2$GYN3pQcXPL%09keiC1HpbYi1u!aN37=e^q%x6(VwCoT6+JrbR?~VybX*iJVHKu zMzrKeRw`y4wLs*92lr=^1(uU5qrpIW9H14Bg8TGuPaCoz`W`G*7?=iTxi6-ugTg17 z!|HTLXm$a)y(RHpI0j^L-1G=6v-kqaun^q=43UoI3cEk_#_Dg|LmG~q#8k7O?E6+s8Jz!-7d>2_Dv`~dODZ0ja`wdy}T+!{Naw7fmP2=A(CRzu+kV$@=pr?!E)^QtP%d3-`t;6 zEAtwRY0R!`9IxdpRq>6I; zAII-T=bov)%?GUwwznx9d}$I3tCmn5**G^u8!GJgLe1A_>DYevj*%x*bmGHC`flHV(Xn?n&jQSm z-PFO)nsBcHh4XD-U?(21hk{@RJ=%#2_}KmEKK}Frvs-_IZpkeITXBBk`C(xf)WNm4 zFXo@MOX2YaM$T^T0Sw;=ha1)`r}%PDTR9j0ryLaQ=rL7v71#pMxgUL!jD;9Au&4ROG=iC<%$j!WL_yG8umSwd0>mteOV z$P;K5u;xH7m2~}#@GS2R5pOi%K`DP&pa3So&`3&>?*;7Zaty7i_G1;O?I|O$z&OYg zm9m9V6JhQyf~kx7Tt2ky7djsvhqU6^Rfq3%KTC@=WMCdz5lo978p_~taWF_(WeA&WRBe9P^-9tj|cs#+~4H!E6gM{FbbM@ z4O~cvnNc&IwZXEF6%a@v22pJGwnNJ~|C9T4wbNJGFTQ9=GNw0)QO(PD}XJN8l zlHPgW2sPTVC+f#}qJ9(9+3mgE0&%M6x1!FlxL$jJR$!Y3k=adtNs2D&QaU+!58uK^ zUOE+aiKl94m_By>MWjEuUpB<2BZtqfxi!<0M{~R;P?zf{wLfWr%=tI@MbGBLgBMj#ds4MrMg5Tx5!uod+1Svu{=6Cq04ANFfB>Kbi)JRbPVXT00jk;f z`(DnqmZ*HSihm_w4j|6lIk8Yq^?mX+^AHvH8u}&x+5vvyNR;dUtUJ+sN<%E`{jk9MAQfVulCM6s;RDP z@Bu`upaOz4u_7W(>D>k@O_3smrbs9B4gr)0D^*I6ZlOtulmrMNU;&igLyyuSLI}Mx z=LTP&_nS4d*7wbte`dbt{J|o*$-O80p0m$B`|kbQpAL!#{yf;Kp>qiehu5XrL*cb6 z(1W9_grA!Pkj@X~k6%|*#QjneA0OsGxFRc-M@yr}Yuto3Enhy$^`(Y%f4q;Za@I(a zIT5h9@Fj}L$*-^Pyi_B|y@#HIoM4*Mxh;ag9GFY51p(2}Hrp!L`=;GOIWVu@?$J^l z&Mn<6znZ!S7CT~VG9?u9OXo*^LJ+)=EOK`Ua5BCpjDJ9GfOI=R4s_oJ>0NnaCkj15 z{#{c9)A0{8s9q2P2MmMGqU1Pa5;PuczZ4tNa-$PI5#*OPq#yRB)Q45ZfhQr-@o~MQ zIz`5(_fMp1~Qg@l|_I?Y7pbN$&Wp@haL7>>i ze3-$qeYHAWg*-=~Bovb0@4G(mWgkYE#Q7`-EAIHdZ9cS%M81JWN*u3zcs)6BVv3sZ zV#v!rbPQ%=m0o}mY`7bw^-odeZz z89Q#7cBQ)ta{j%Mz8vf`NftCmJ@(u`ZgvzvL1b0y!8R`ix#oM6GgseNSWqnfaj?%r zlecjosC$vA;W;M?ZGOI5ci-EACHLaL*u*rZ{V+V*8cm^zhhP}M{WzfR2e;S6YrQFB zN%gtxKb@d1eN0attciufH%}RI04s?u&uVkcgE~i%*iA1{zPg{l$7E6o0*^7{>9GDY zD1&YFqE`2>_nB&+UyAs!g6Rka>{yk6^wy9@KL49}ZVWykm3F9fIQ%6D+D05H&e7l3 zkq(kbbYw&k6ePvZq7PBh_&H#*@IBjG&GgX=z6xSm@6EU{B<&zSuQ^fHMvC=uRnZYuc9tI@#m*J>%QviNnlUvC7SZyO|O zH0Dje5RVXkvB;z=ox&B=J2eBcn3K2qm_w65`sf9K_&aH&Y1gv;vb`ysoL^Q<>el6d z>(;pq)?rb}k%Qf@q{&}b1_xLy&79s#d|3KCKXzbc(TY27e^raFJnrKioo-=9((YIE zE*M=9uiXpcHfbr4nrb6b!A#qTn<{yd>o|${j`OIRZy8U@K%mg1lr5Rp!ms~Xun)A5 zY&3=#9I*Y%v3Fd7i3+0oLZ2*ILUBqfLCoo;A4@ux!q(z+6mCLi<#N0zDvN^}+1BpFx2Wdl6*PJj%Ua&z1^$i4k=K2-YOwnY05 z*QkS@y}+3COc=qq%-6HQYy#;LA47Tjd=L5{(+i3PGaANF)TPSEB*n4vM>b5;CV0*i**d2O`UX?~d$UNA7a1C(t{_g86vriVxU+4Hc*{uPkaO z2U@y&0?_0fWl26^d0Re{zpuwSl@}wGaJlmJC2OlN@9T(ap_`h#l?Hht6k$BdKZu*+RB^bH{C48X0SC0z%t zP0eKG+j5$V9g(d1=uj|P3Tp{xiqr;qdqqKgjl{r$A)@*lytaw6WmlGTLYv#>7nDdi z@J}fHKtSi~Cs9%RS;9`9Z|uGzK% z&T|gG103H+6=$=f&%Lt+^dRwGzzA3ak4-{_277n4ehYsh$G-pb;I7_Uu^;i(YmuK;22)kem@rhIsRpqC@aSS0-LQ3R9=psc{6qXe+BUrt4Y7pOge8NG!0l4V@nTA2 zjf%*sz9hx*1&0_1E;ooCrz*??$U4n#0q4qdRkI5yAR`7~B5ohbzMtI_ESimUlt_;& z2PONS^}oG&>f-%kNYiEO+?#2LXnX-|Bal6r7uz8cPaim00Dt81+}9SkBit*Ist$(^ z(Pc%ruof0zY~5w5ZWJ?B9SQ?XR+tf-ORwB)Cx$r^9#PP*}S!-pJ(>A#+dW`(YBP02bB}bn5V%Bntb~n^27U3J$^|#EBPS6quaiDz+(Knd<}>b;9%OM7GG9tXU@<|m;*-5R(~C- z23Vuqi!mu~X=HMlXpRATZ<>-$c?-z;RWf&LvuQNKdVd-#?!~!7JJ&u*Gn1IGU%XR( z!UNKnp)iRKlr+e~7sf~&GueMzIXl+9p?QWmD9Q#RJ`d2{PA{-1d3H0JbVSA8s4~E0 zV`+7?m&(b`N-8xeE{WKi=AvT{@IVkCu)p*uE{CK45Tan!*?iydF|++)1^nHf|4qJ;hsiBU5yUyzpLlQ#m<-KE)HX|qVsVZfQ8_qa6eMTk_j8id zZ=MQr)Btnr>RVnGqrUBp<@wLvFvd6|idM?zbDDKGsPhHn+pg|+;&czqr($5$F748l ztYVQ&$Bj-dTp7Sj;bM$t+V=EG)JaK_^j^_>XI~aW+@4XSHWbzS zg;)P2vtq(j?63z7z-nP#0Ht-$Ys2I5N!UAw1_qQGEgja_0?Spod7bWd2w;tQ4<~vZ zNL$RE@fFmsSRm28v(3pP9YP@XCA~4+0HDWe50G0nDx_ay>Nmp&>|!-xV76`(md0er zcLO<=6-y<$@{yL&eG2%60tf%;%S;+ME1Y15GyLq!ATOzWoK#rLpi1f`ZHwT_HwU!r z1oF1wVcaN2#WZxsmDn2VD;0()U(KZM!|mHPmEPOy%uV6f?ONGfG9hC^N7lZMGPY-bnUTArtSr+Jl6q6C6xvswW#7w<>X zNBF#s%?to~+8TQemlRP;N5i}sT4R_RuU_SQcbS_#*5=LIs9bej02F!c(Cwt38`$ha z2QZd?UkkGWuUwjyI1y-kfFpzo`=yLPXJrCpxD=ns(mTz3 z^fIA;`$yY^QN`IT4gQE$R}c-q;NZsas^f|WpsMYHm^TE#ym@yJn5~OUMwu{G_4QYb z;$w%WORHwV0-|?pXRB6oL`4Yf*GWp1_;HSWZu+Uq{7&;*>)nYDHl@DUpW*%>99VP# zySXJGRkpQU&U*|%ZMhdES_tpO2EmFcJ~bM7N~@L7wae&Cj6w6V-*+flcz;zK=0X;vM?uoD*Mk6(@yNLwP2CjZez*Eoq>;G ze?^N#uKHw0EjC57!IHgZ?Y0C@4o=4e$yO}z_ZNY!&Vu<>6xOu}jHR!mW@1NPjNLc+jJwA7@C}|~miB3{Ih#W_yrkcDtqtAY7T&5PUtazgKx_3q*GC9eC>5PcWR-2MBk&)pfW$?MTW%= z_6Zu!7QdvmLb3VmbSejJzcV}m^qTHmi4UGRc_q(%?=80Nr%vVAGh|gRl8Mz$fkg`T zwG2TDoabV^hyyID)|o1NAO@%9i=>$$TX9OqmC^q8&|LMrdfc_aVh@oBaR9`bu9x(q z@6hRbMHD{+djcLIbKd6C>l3GHiudBmr5|2S%Wpyrt&RuqCg*Gk1g#mCFO8-w4_<=M zeN3>Cn}RXRzh1-|3JrcykE=doz$w5O>18YN10>+H7spwIr}DPsf(w2JVu`)tW5H*5 z2-fkfbZ@~70B@O}10Hxf5bGCTU|}JVuAk5LnasWG)+2nZP(Up5Ag7|>fyA2jEui}g zAk;#T0)Qo4WmZ13BtNl}=L#VeU?ZfYqOfNUXF!hiqo_y;#oyR4km*s=%^fcG`RKV~ z5TheLwpDM@uq^uQo;*%D6Uwm8OyBs>Gk&|Ap%gfR;-D1cAruI3@-A7^wMIn25_oMIsnx@{#LNy>m6Dfu&dw zm;>Cf&&5eHCwh;=*`}2Zc`PbjhO(YUO7n=-P8g-x0ZaZYsHnjEPWl5_7}5`8vR~VO z)X&#}4^q0^8O2;}6%fAL6|;Tg=bzjMw8(^Nuy(Qm*Nw!veCvRUlVc6*eSZjW=P@9k zW}nI+(74?a3G02<4?hjRB3}8nY^LN3)(ZD&7viF^=PnxPAwY-IX`MIY8r*Lh^;ClI z65}r&Ayd*}Oz_P&Z?;z{yL1jMRd26qN~L(7qbLJ7rmEaxXl2vq!))y&WAC!4 z$dPc5?CxB3Hobix-baN$_5o!|K;&Lm0bp$S|ET1T;+h9P!zRyT06>ifTFv$aeFBOE zEf_+DAR}PUHWaJc!rb*j`VfdB#_60g_1(@hRJ;{@lmgHH`A92eo_0Mqqtte5s{J~AoL7UmGH!WVfgOw2UzvG?fO z?s=VI1G_DHQ`DeePPu8_V{a}{YbXfs2Uxs?H78%3Vl)-m{`@%zUns&J0Z!g7o%>3p z4<`{w-}nxJFmP%&v6Yy%;-X5e%h=|XZ{`A1V6P<038!wijN5!44KaD2=v}kB?&CYY zEv}=84-v+geLwYHJ@z5%S0*Snm*!!^5!lIZBDJ9f#jU9-p*x! zs8y*jneV(V4u~d@gD%)#{hiWy(qajXI|)Wo-h~jowz$XVv#ElN8aOW{en8!14)VQq z0*_4%th83dNe+c%`YE9YIx?{wDRrM~p^*;AUL6j}^<53&g5pa{MZN)-9dEI>q;~8p zE`?BbA=KVpHU0Q9#{t~RW_`}^D2gcYK1@i_deW!ezW1$lpD!6vs7|N&;vD0wsgEUs zvb@!c!u$ZEF9enF5+W-@!Qhvb03}nrwi!e<2N+%#2M@XdpBJruP}f2Vd)vcFS`t@6 z>G71$9e35HQ%oBxgbnUjevY+9S&^Ha;j>Q`lDPXWM;8+i-3ZlY%VsIZD+dhrT|&!G z!o`>5t*Wk!}_qF_yt_M zJ>Mm(mnnIQy}cTgjMewc-D`{7z9PihH4k!OO}T-YNuuOc!djhB4?w4?fw_^KmYX~}Xpxu{DGtzo{QzGupo2<#?i6@8g#ttma`#sQTKI(GW>FNZSf8_or zf=pj;r(>K4T!_`(PDxpUEZ$)%e8nHxbk!S;(w&ViVw<~rJA6^OX4`g0_tdF=T{0%L zxuq(EJkp8Gb_#|{2t;b<=zCO)3p9lHIhL8OoKXW$jH415@~ z_1V@*?0~em2B>?(?FlCPD&Ft@AdupJJ}nD8o_zoqwo;jFN;8NS22MUVE-(gQLB==3 zR4XiYbIXrCYh?9(1JbVMK0#>aj^Lg89Fg+rvoCL7cRS+yhTRaAkVu2sa{Pwbq6%m0 z_kLz?JS+Lv-inqFx$2GIY2S_h{arZpTTf7wPk_)BVsH(sFCAH@h=z1tq*Z?cpxi-{ z|Jno`Co5T61{26Ph^;tX(KHAy}kUq3W8r4gV|7#ZN5 z;PE>L)>9q`;@ff#a`pf@-0xp#tcK20bT}lG0HQsq5Ll)!s4iE1kw!u00#q*OljmY2 z5@sWpj~|C{bDt(4qIcn)$n21P-5|CWNh1hb(JK1LiUG`fnu}byK2yibWj~8N(~7e} z5AI>kOP)4~3VYFUOV1A=js+j?(yhfw;C9%|5KB6UhjV?X`Ed;>nou|Ki|yOH06d9% z98_Ki6M3X?E=l729KbACqe6}M9Zny9t$I0s>em5t0G9q2tQnHxNL8uzwy;g;QSd*smNuAb4NjLE0t%-cqt)eN4n$mXC7#HEK-XT;ih(;!N7Tdc*}pU!<0IT!)mSsN=zh>;8kbZ&TFD(*}c z^sEs)H>02WzFr~TR%!%xEL^wpbl0_%PlA2Nz^5u5dm?aiy@2>xDc*D0Fx96X*}hOG z_*oz-H^0E(?1C^`ihNbbtz#O@9kg$H? zTaC(Kz*FZ#9WOm>&1t96-Z_nqeb?7@W!{e3&i#%p;c8#JD~Uvm^hS}Vg3Wzs1zk!G z@b1!zRprMP>in=IDJ#7lcYPxs*}~tIPTZk;1KuM{#gQfx>TE119Vh!~0vNwwm$%2} zG-Y`WkQpJ+Jso62TK55d1Ruoi#$sNutpLDzNew_fs&z9aJoBK9@e$e>A8F86t~|wU z`TB|+x#TrlN1Pa6?4!70@B!$fGq6WRW zb&9oJ3ivShe|j%0sSB9bhkS1DD+f_f<->qXxrcun8t|~j9K=w#p9d&MBLRx?IC4x+ zThPO7Omj^APe?i3JFbF;VDfka zKX3t~n+|JGq9i3SKJXmyxwS;KQ9pr#K@O;Wrw?kVh#D_T1~3AGSV8pK37Ds%AXI9HwHND9R0=J_jF3r7ks#;^22U&T(2ZY$)W%Lz)!0!Yv{!bt`fiLV>_cfK*#|GNWde_c4 zfpUk_hfY!d7uB@dRo2_ixA}QZH!;=b$p7L-|B}hGq6|QzI4>Z;x{?MWb0loNyrT`x z5P?+cbDHRbPC|tTf*#+zpF)~;xq??%{Hyz8^-9`35UL(5uwm#w`}JK>gkYTHdeTe< zu=qifr7c5@b5r5%Z$XtVyRNM3ZZ{Dg94YeBVB?LveE}*TgJLd|u`dE^B4|*J9~f-k z+r#fHvQythC`f?vc8<3d1VfiW?XP`pP+c~d3SBXn29bW`!Qyf;(`)zDO1D1EII-8f ze2`*xA5Zjf=WM54U1f`=rvEGD7opR`MqaB7Zl_lGg+-Asu*hs#P5<>*ZnA2~*H76U zZMxz=ujW3x7WM3!qurJEBNvSK9mEVg?l@Hb;dxAAmBh(3<4B`ZvXLwq7|n(=G8c<) zx!5GpAGBk8_buzB%xBHj$oeBHC&(sdOT9>eP^3c2K(dLlo9Easia^+dJg?@a2C|~i zC$nZSA53O`gd9J#z~=AJ$Rxy$*sNySI-OiHlJ{I%chY}8Ve)FAGjb??u_X5>tzags zrE?cpT3SY3JHAjquptJVH}#$m>y7$v{*n@)mHvXIB|?R@(_S9j94yzYO}xhbHg6ff zezBc;wsm@3zN`H7!Q>)^)+Aa(Jfn^se2U-~v{XrQ^9i617rM$d_^e-4JTs$|0PS<=D@-WN>!6W7+60MK?BtaLEg z7zb1r*S(#g0AN5T|r zHgdPOrPrEK^0i3@9_u*wX*~m;!0BoEwyvjM8f%i}mGg@d$qcNN&X=rc>*lXy zzXwAViBlL!Kjxw+Qhm8pKgwLmc4M_{tm61Bft-~~N&}l|?2R+!tAvtCb$FYG#!~r- zAxUtDV{SsyYv6vHXXi%fJX;q$WqTB@xg@*Dd4`E^S?Qx~owJB`mi=gN7%^~ij8HuJ zM%Z?|#4BBY^tRQa1eIh-F*5ZVF-M#wHjA4MlfMyP+eV_y;KLqseiEb0j?_)K=TAlq zndH%jdq>qQ5_6&+XgWRQb2;|f<^~%(TKadtWLpw#Bc%*a>>5nr%Wi~(9+;5n;Ad$@TZA4J3@d2LaeWw6LvV02{ zfxcPPIW(l11>8`8!FsQcXe#N| zs0+5MiyltU)cL^33x3u9)~Wky%nf|~D?JNaECClV4|=K%wAQ>i6BgRkk(lSMm-JYSjgnt+94;9d?{Uy{2oUkP zVoKetM|-t@?a7UoNgP^_%%mUlD2o36=D`4~4-%-a*9=AX1P?SM`#aez2IyI-Kr(TJ zYD=4&9zp0O&gV(p;cGZ9J5~2cKVT#>Pst>o?{VY93WrOwN9rRskW=k5t~POK#hwq< zH9?|J{c@CW1luZQo*&RoJw$ zlbAdlw}IrUB=F9W`*6tG0?MY+{eunGi+)-JCKERg`}@u=Qxm$ZlE7@hJ%V&{Jn^JKCy zpw`v~v)M9-W5rz@m!oT&&MK?r2;ATDed~;NJkugP2oGG_*utuP1;b+cEYLZyu?ikU zo>}?XJDOl5_&9yZ4~8IBPYflCxc02MNgab`6OqE@6*I zx-Y~@?vK=69-9KvV@Vqm!+*Tw!A8yZ%#l$)LzfpR9-!Q%3OiI~9u`1;LPoP8 z^y269u(6a%Qy|5v^pKVd7cauWkR7Hk8CvUnJCxABu_=)FUix)Skki?_TXS5szCT`7 z&LrdrM6Bv>^c0j@77~x-QCAxz^mlzo=RB;5CTfNv1t}sBWFn*iA_X9K}-YR5a z6&8hk584yPDwg|ScOm^7TO&p}-}jvFt=a2b^jwvTf$&-@R){b*bS=Ba{;0qR`@48g zTjQw2;ZLt?UZ`N$DDUoSE?(IEwYrmbdWZX9_5C3OG&qRI{d8?n2GR|to_zwxZ>V_W z5^ODv;LpYiHO+e4+K4k-O~k|i|JJvItn^VlQF-v~><$qot&89Ku*lk0?~iK3} z#YC9IS1BCAcBBs^qmGr=@)3T*(2WUN%_Cs3e0$Jq-|ARr`oic6XoA$E2{Pf(zb2x4 zDo>QKBIT?0%H&G6@b{sgbltWpte<-DSNFpsm2K+9Vd+n)*688uwU z(>SV^bt$Yn&cxrcJ;JFEPqpdps$;8&Hjz#gX1>P@f63%zX0Yz2Sd~*LQ0?sKr4mYf z;VRh&~5U+xpKXt(KmCJa#QE*T~!QN=H`f+98^hM@u3Dwzomv5Ne*IHzBh^cvZ;#+VtTWm zbv~e}x(7CWDRDAI;FNYtF3JAoa-%&OH#9Lm(h1d9*K3Oge{F7tWB<%&*O=wi=7AL0 zi@AOMxCbp#U~1S%jLZe3`QB$#ExA)RJPLgWN#je$g9XB{zn1_nk9y8mJ|9 zY!PI5MQhzywR97td9?2eCW zzIWzxZ!Zr~GSSl@CQ~kNZDMV>YrtwzFLX6ZU|h1s8`w;O1DKx#HcMb8CC}mmjN%8G zS_EI%+mmQky%4B|!r4#UJ z#hrVw1MUiGZtMcyMh0L@q5C?PR=-#!+x1y%-TCUCZ)7%WoQ}8}0%bzYw3TvgFi4$? z5g4{qM~U2IT4SbH#}04xO(9FLb4za~ zl||QR7NO=&2M_UU=jN>5fp$CQDTO<=mZ6Tr=#yN#$~eLVz!0*qPTq{`XfNWYgC&y} zi2<=SHL9$%4>i8tIScIt9keU>-43bzrw$xF3#P06^hR&+B>v3!`-=+Y%L0>e+Wi0z zC~COXv``(0laI~raXY5u<>Nb@5U63fHsG{vX(ZN->P=^DTAuAgt8b!6QFWe8256<9 z7M9y^IQ1uHGL~R)w|s->RhiIahC3bmbJBV#gJpX$Pr!d;P92qrMhGsCTNRGK+D;w~ zbh0$sSlFCg;Na7yb^cKGUtkLUZhQElNwm{~V9?zB-Tto}02VOR1hoHC!uqd^_y6H< zm_sH2u8Sq5H`>>)S;Nvhoq~!E*eEa|sax7D86Q2clzz#yJGQRll0sx}HfxiY$RP21 z%%&5<+*}OP?v?6)UU+6&)6$zdYNjzqB@|m&myOH!xXDg;R!qi7q}0W1BWX8Q<^A0z zx#hR6+eAm8%ZGA{NUwZ-kDi95xm9fSqT9N=ZD@{wG+7~gn)e#IU;IJPGV{C9)7H41 z7yt3V1r1=55q+E4QR#z&(wP_WZOiowzXns2W~uXWNzo#P0e8;d%~U`gSHKAHa&l@+ zx9$D2H;$cN;P6bIE%=a3Ic|o$4rr#*u|KO(>uv4LzZ8ECkBgeT5~(cipHKc(b|AG} zAuOfrbnIlY!E#2*Bf^e*;|^_)UfqQ@F?ktN7ip0PPf&Gs(&V1Gmsr;!*lbxnh(R30 z%Kh|z+Ega9Xcpji9zM^HHX1JnHn81g<72KcqQp19zE=yY&9Q!jXhjt80+`5cERl6y z9|Z*tP!iT$@YkoK|y41}-!$Dmt2;y=8HnPd`L=G{Utb zkiVB>y=N;`U`#c9l(|^8+m+y!89I-g$=ptihbuC?y+Ipt4|e=4$nB?oN8E;cDwJz92w7@ zSvl)X8}C6jkgl6}AltQ86JC2{w2rJ%AD;$Jt!g;l7iiAES#Q9n8H2X={;lU#;LyEG zt49vE2BI0S#f+z{AJ?#SA&d&m`){uT%sYn}Rtn=_{mZWdXGH5yFU{ID5z)R=j{L&# z+|^-ltx+c&8(k5)fc25HBOKH5&sJ>07+_ycilj+-NAluc8 zAC1xl+mP@wInwyjv~~$n$KCKr2zpxE8fmsu&{k!g*13~>eTx{!n)0w9EF5x5-TKbjG3VRf z2tR%kbs^9i_Y)`7?xQ{^N0ku}uV7#Z#;_MOZ>^LW<;EUQ{<@lyq@_&aFq={@LC$S0 zgorw7qRf0<)~dpY>GLR&Y(*Q*=2;ct&+-^b?WCsYrdaaY%L?E&2d&Pv&T!mRA_Xkx z;mICLa2@UD$Gc$e`9!mIJuYr;P1=VftSyCU|3H6s9on&Zp#N21(DvYW`{w{8i2o)c z;l1zgukd??bH4Vf#S!;OJ3X?65_E8Ep~OxDFNr z6*W$1e5s7jm2*LYGxvp5h}`4t=;JWCzV0kiC-g3?{V*7AEr*{xeR2fWQl_f9kq@>N zz{m^-Yidwy7%t)ZP+xbuY}O zQi0tmZCjeA1%i5LLZ;45^+rX#*##@s4yQ8Xw-WSddkUDqW$3B~-`agu)l`qb4rzzd z_9}<2(%$9-o!LZhY=0BXFc~VnoY6k$?n@6#sIQb};pNe| zs>Zw{!(LjqiL7IERU>oP@TZ>cZf|Lcq8ULde$cJW3Vje?GTYJKmZ+tCO4!)jjy`DB z%H93_(I2DilvSEIRb-*A>uNF2&&d%sWJY6Bip_t1E;j%4*?lTIGySfwFQiAGt7+2* zIZ8`O#nsf-wslX~OfQ=Wf0cKAzXbM zH&G|QfA2p_$Upb|_fYu9(AW{gKMH^*hkrD~Kbqkm&G66Wi#7)S(TV@)M4BD`k52qY zC;p=o|DVu_8-ada(cIOCcbl)|VCf64ybotPDcZ<9s`khv&I)BIL+8B9QN^=|F_nL8 zPZEDogGl&aRpo5*!w*|;bQeC4uPfeu%)8xKs_}SJ;blrf!c5A3{MtU?LZ<&W52WtD zKYiN=ZP9<@lic0L|q_ zdo$iR!N_3<%`#`x1A=0HH?b(k)$eJOxN4D;!Zd#&?R_z9Ok3KvgH+|$S(kr(5(bO* z?l6-I#&?(E5B+(wYS-X_HL_J?3DRMj|IfQYtmKHC={S^YrK6`bFMKDE7&NQ z{w21eHLe~VNJ0Jmi+BVo{izGRO{jsrKAV7dQ|YTa> zw-mxBJ3|sq49=_%haxl^6BAg+|43=paWQ)K0rkQuyyEsz%02n{QR5ibQx9a|I`baq zTUE~^1j9@J%nx&a!Rs^1>d&j$Mt!bDTW|)i*hr>&{I!g&qzexUZ%b^d8HS{D^Saix J{43^<{|8OiKra9Q literal 0 HcmV?d00001 diff --git a/docs/core_docs/vercel_build.sh b/docs/core_docs/vercel_build.sh new file mode 100644 index 000000000000..ab64d74bc9dd --- /dev/null +++ b/docs/core_docs/vercel_build.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +yum -y update +# yum install gcc bzip2-devel libffi-devel zlib-devel wget tar gzip -y +yum install wget tar -y +# amazon-linux-extras install python3.8 -y + +# install quarto +wget -q https://github.com/quarto-dev/quarto-cli/releases/download/v1.3.450/quarto-1.3.450-linux-amd64.tar.gz +tar -xzf quarto-1.3.450-linux-amd64.tar.gz +export PATH=$PATH:$(pwd)/quarto-1.3.450/bin/ + +quarto render docs/ \ No newline at end of file