From 7cc200766ef42ffd3d99698a49451aef9985ee68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kacper=20=C5=81ukawski?= Date: Fri, 9 Jun 2023 17:56:32 +0200 Subject: [PATCH 01/46] Expose full params in Qdrant (#5947) # Expose full params in Qdrant There were many questions regarding supporting some additional parameters in Qdrant integration. Qdrant supports many vector search optimizations that were impossible to use directly in Qdrant before. That includes: 1. Possibility to manipulate collection params while using `Qdrant.from_texts`. The PR allows setting things such as quantization, HNWS config, optimizers config, etc. That makes it consistent with raw `QdrantClient`. 2. Extended options while searching. It includes HNSW options, exact search, score threshold filtering, and read consistency in distributed mode. After merging that PR, #4858 might also be closed. ## Who can review? VectorStores / Retrievers / Memory @dev2049 @hwchase17 --- langchain/vectorstores/qdrant.py | 116 ++++++++++++- poetry.lock | 286 +++++++++++++++++-------------- 2 files changed, 270 insertions(+), 132 deletions(-) diff --git a/langchain/vectorstores/qdrant.py b/langchain/vectorstores/qdrant.py index 3114e76b6ef6d..9acc99fa3297f 100644 --- a/langchain/vectorstores/qdrant.py +++ b/langchain/vectorstores/qdrant.py @@ -166,6 +166,10 @@ def similarity_search( query: str, k: int = 4, filter: Optional[MetadataFilter] = None, + search_params: Optional[common_types.SearchParams] = None, + offset: int = 0, + score_threshold: Optional[float] = None, + consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. @@ -174,11 +178,42 @@ def similarity_search( query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. + search_params: Additional search params + offset: + Offset of the first result to return. + May be used to paginate results. + Note: large offset values may cause performance issues. + score_threshold: + Define a minimal score threshold for the result. + If defined, less similar results will not be returned. + Score of the returned result might be higher or smaller than the + threshold depending on the Distance function used. + E.g. for cosine similarity only higher scores will be returned. + consistency: + Read consistency of the search. Defines how many replicas should be + queried before returning the result. + Values: + - int - number of replicas to query, values should present in all + queried replicas + - 'majority' - query all replicas, but return values present in the + majority of replicas + - 'quorum' - query the majority of replicas, return values present in + all of them + - 'all' - query all replicas, and return values present in all replicas Returns: List of Documents most similar to the query. """ - results = self.similarity_search_with_score(query, k, filter=filter) + results = self.similarity_search_with_score( + query, + k, + filter=filter, + search_params=search_params, + offset=offset, + score_threshold=score_threshold, + consistency=consistency, + **kwargs, + ) return list(map(itemgetter(0), results)) def similarity_search_with_score( @@ -186,6 +221,10 @@ def similarity_search_with_score( query: str, k: int = 4, filter: Optional[MetadataFilter] = None, + search_params: Optional[common_types.SearchParams] = None, + offset: int = 0, + score_threshold: Optional[float] = None, + consistency: Optional[common_types.ReadConsistency] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. @@ -194,6 +233,28 @@ def similarity_search_with_score( query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter: Filter by metadata. Defaults to None. + search_params: Additional search params + offset: + Offset of the first result to return. + May be used to paginate results. + Note: large offset values may cause performance issues. + score_threshold: + Define a minimal score threshold for the result. + If defined, less similar results will not be returned. + Score of the returned result might be higher or smaller than the + threshold depending on the Distance function used. + E.g. for cosine similarity only higher scores will be returned. + consistency: + Read consistency of the search. Defines how many replicas should be + queried before returning the result. + Values: + - int - number of replicas to query, values should present in all + queried replicas + - 'majority' - query all replicas, but return values present in the + majority of replicas + - 'quorum' - query the majority of replicas, return values present in + all of them + - 'all' - query all replicas, and return values present in all replicas Returns: List of documents most similar to the query text and cosine @@ -215,8 +276,14 @@ def similarity_search_with_score( collection_name=self.collection_name, query_vector=self._embed_query(query), query_filter=qdrant_filter, - with_payload=True, + search_params=search_params, limit=k, + offset=offset, + with_payload=True, + with_vectors=False, # Langchain does not expect vectors to be returned + score_threshold=score_threshold, + consistency=consistency, + **kwargs, ) return [ ( @@ -318,6 +385,15 @@ def from_texts( content_payload_key: str = CONTENT_KEY, metadata_payload_key: str = METADATA_KEY, batch_size: int = 64, + shard_number: Optional[int] = None, + replication_factor: Optional[int] = None, + write_consistency_factor: Optional[int] = None, + on_disk_payload: Optional[bool] = None, + hnsw_config: Optional[common_types.HnswConfigDiff] = None, + optimizers_config: Optional[common_types.OptimizersConfigDiff] = None, + wal_config: Optional[common_types.WalConfigDiff] = None, + quantization_config: Optional[common_types.QuantizationConfig] = None, + init_from: Optional[common_types.InitFrom] = None, **kwargs: Any, ) -> Qdrant: """Construct Qdrant wrapper from a list of texts. @@ -373,6 +449,32 @@ def from_texts( batch_size: How many vectors upload per-request. Default: 64 + shard_number: Number of shards in collection. Default is 1, minimum is 1. + replication_factor: + Replication factor for collection. Default is 1, minimum is 1. + Defines how many copies of each shard will be created. + Have effect only in distributed mode. + write_consistency_factor: + Write consistency factor for collection. Default is 1, minimum is 1. + Defines how many replicas should apply the operation for us to consider + it successful. Increasing this number will make the collection more + resilient to inconsistencies, but will also make it fail if not enough + replicas are available. + Does not have any performance impact. + Have effect only in distributed mode. + on_disk_payload: + If true - point`s payload will not be stored in memory. + It will be read from the disk every time it is requested. + This setting saves RAM by (slightly) increasing the response time. + Note: those payload values that are involved in filtering and are + indexed - remain in RAM. + hnsw_config: Params for HNSW index + optimizers_config: Params for optimizer + wal_config: Params for Write-Ahead-Log + quantization_config: + Params for quantization, if None - quantization will be disabled + init_from: + Use data stored in another collection to initialize this collection **kwargs: Additional arguments passed directly into REST client initialization @@ -430,6 +532,16 @@ def from_texts( size=vector_size, distance=rest.Distance[distance_func], ), + shard_number=shard_number, + replication_factor=replication_factor, + write_consistency_factor=write_consistency_factor, + on_disk_payload=on_disk_payload, + hnsw_config=hnsw_config, + optimizers_config=optimizers_config, + wal_config=wal_config, + quantization_config=quantization_config, + init_from=init_from, + timeout=timeout, # type: ignore[arg-type] ) texts_iterator = iter(texts) diff --git a/poetry.lock b/poetry.lock index be30d063aff71..96d1c500aa513 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand. +# This file is automatically @generated by Poetry and should not be changed by hand. [[package]] name = "absl-py" @@ -1709,13 +1709,13 @@ files = [ [[package]] name = "deeplake" -version = "3.6.0" +version = "3.6.1" description = "Activeloop Deep Lake" category = "main" optional = false python-versions = "*" files = [ - {file = "deeplake-3.6.0.tar.gz", hash = "sha256:bf502ed4fcd19624e750c649b8dd2fb892529a29384c8a816bbb09005b763db1"}, + {file = "deeplake-3.6.1.tar.gz", hash = "sha256:78b0280e3e21c6731a96a9a2519a24e767df708c309e934ab473dfbc17b13581"}, ] [package.dependencies] @@ -1732,12 +1732,12 @@ pyjwt = "*" tqdm = "*" [package.extras] -all = ["IPython", "av (>=8.1.0)", "azure-cli", "azure-identity", "azure-storage-blob", "flask", "google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)", "laspy", "libdeeplake (==0.0.55)", "nibabel", "oauth2client (>=4.1.3,<4.2.0)", "pydicom"] +all = ["IPython", "av (>=8.1.0)", "azure-cli", "azure-identity", "azure-storage-blob", "flask", "google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)", "laspy", "libdeeplake (==0.0.56)", "nibabel", "oauth2client (>=4.1.3,<4.2.0)", "pydicom"] audio = ["av (>=8.1.0)"] av = ["av (>=8.1.0)"] azure = ["azure-cli", "azure-identity", "azure-storage-blob"] dicom = ["nibabel", "pydicom"] -enterprise = ["libdeeplake (==0.0.55)", "pyjwt"] +enterprise = ["libdeeplake (==0.0.56)", "pyjwt"] gcp = ["google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "google-cloud-storage (>=1.42.0,<1.43.0)"] gdrive = ["google-api-python-client (>=2.31.0,<2.32.0)", "google-auth (>=2.0.1,<2.1.0)", "google-auth-oauthlib (>=0.4.5,<0.5.0)", "oauth2client (>=4.1.3,<4.2.0)"] medical = ["nibabel", "pydicom"] @@ -5305,13 +5305,13 @@ twitter = ["twython"] [[package]] name = "nomic" -version = "1.1.6" +version = "1.1.14" description = "The offical Nomic python client." category = "main" optional = true python-versions = "*" files = [ - {file = "nomic-1.1.6.tar.gz", hash = "sha256:8be61aeeb9d5f4f591bfb5655c9ae54a12de97498e6d2e24cf22faf9c118cf81"}, + {file = "nomic-1.1.14.tar.gz", hash = "sha256:7980516131a125988cea47d7390063d90a9f5bdc6d6063574b6bfbb9897a7202"}, ] [package.dependencies] @@ -6023,58 +6023,21 @@ tests = ["pytest", "pytest-cov", "pytest-pep8"] [[package]] name = "orjson" -version = "3.9.0" +version = "3.9.1" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" category = "main" optional = true python-versions = ">=3.7" files = [ - {file = "orjson-3.9.0-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:128b1cd0f00a37ba64a12cceeba4e8070655d4400edd55a737513ee663c1ed5a"}, - {file = "orjson-3.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a3693fde44b2eeb80074ecbe8c504b25baf71e66c080af2a574193a5ba81960"}, - {file = "orjson-3.9.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3f1193417b5a93deb41bcb8db27b61179b9b3e299b337b578c31f19159664da3"}, - {file = "orjson-3.9.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:88626d898c408450c57664899831cf072787898af4847fa4466607ad2a83f454"}, - {file = "orjson-3.9.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e3bde77c1e0061eb34bae6fea44818b2198e043ee10a16ad7b160921fee26ea"}, - {file = "orjson-3.9.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45df5bf6531ffda518331cc93cdcd4c84f4a4a0507d72af8fb698c7131a440a0"}, - {file = "orjson-3.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2536a7f30fd4d77532769ea9285cd20c69bd2b40acf980de94bbc79b1c6fad5a"}, - {file = "orjson-3.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:21f6a6fdfbc13cd715c61e9fa9daeff732df6401ab7d6a2ebad0042313a40bd1"}, - {file = "orjson-3.9.0-cp310-none-win_amd64.whl", hash = "sha256:46c9733330b75c116438f555c0b971a2388b5f502e2dd4ec3bf6bacb96f82741"}, - {file = "orjson-3.9.0-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:47d7e4a3effc0e9314bd5b06e7431f2490a5e64dcdcbbc4d60e713786fec327d"}, - {file = "orjson-3.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c41d1ef6ec308e9e3701764b3de889ed8c1c126eceaea881dd1027bffbed89fe"}, - {file = "orjson-3.9.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:86da00836029b2a071229c8aecab998a2f316c1bc7de10ae020d7311de3a6d0d"}, - {file = "orjson-3.9.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4fcf598bd5a99a94caa7ec92ce657939f12491e4753ea7e4d6c03faf5f7912e"}, - {file = "orjson-3.9.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09522937479bd39d5bb32d11a5ecdf6926fda43ac2cbde21cc1a9508b4e4ea29"}, - {file = "orjson-3.9.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2fbf34667a8be48ec89d5ef479a00d4e7b3acda62d722c97377702da0c30ffd"}, - {file = "orjson-3.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:edd77183c154cbedaa6dac32fee9cb770b04e2a7f367a5864f444578554cc946"}, - {file = "orjson-3.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2af7dff1c7ddb0c83eb5773acf6566b153f8cd32e4ba782ae9ccd6d0f324efd3"}, - {file = "orjson-3.9.0-cp311-none-win_amd64.whl", hash = "sha256:44fa74b497e608a8cdca1ee37fe3533a30f17163c7e2872ab1b854900cf0dfcf"}, - {file = "orjson-3.9.0-cp37-cp37m-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f6476e2487c0b7387187de15e5b8f6635c29b75934f2e689ca8cad6550439f3d"}, - {file = "orjson-3.9.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7b241c3229084035b38cac9b5c96b43644da829da41d9d5be0fefb96fb116e1"}, - {file = "orjson-3.9.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d414fd0678e949779104f5b307f0f9fac861728e19d3cdde66759af77f892da0"}, - {file = "orjson-3.9.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a1fcddcabe121e393f3c4a31ed6d3535214d42a4ece0f9dde2e250006d6a58d"}, - {file = "orjson-3.9.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd89d63707ac616462832bfc5d16fa0c12483f86add2432ce55c8710c9531c03"}, - {file = "orjson-3.9.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c50654e4870805e4b1a587c2c3c5ef2f36f3e67fc463a738339ff40d65f7db1"}, - {file = "orjson-3.9.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:721d47dffedb7795ffea8a06f2de7d192de7b58e085cf357a99abf0eb931f2c3"}, - {file = "orjson-3.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9de2129d40674007cb24164939e075b5b39fee768bf20801e08c0e3283bfb18e"}, - {file = "orjson-3.9.0-cp37-none-win_amd64.whl", hash = "sha256:5afd22847b07b63f2b8fcfddd5b7a6f47c5aaa25e19b97a3d6d39508b8fd465a"}, - {file = "orjson-3.9.0-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d4c2d31178e3027affd98eead033f1c406890df83a0ca2016604cc21f722a1d1"}, - {file = "orjson-3.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebe372e9f4e4f0335b7b4ebfab991b3734371e3d5b7f989ca3baa5da25185f4a"}, - {file = "orjson-3.9.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4949fc1304b702197c0840882e84b86d8d5ca33c3d945cc60727bc1786c2b20"}, - {file = "orjson-3.9.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:748c1e8df0b0880c63d323e167ad17ab4db2e1178a40902c2fcb68cbe402d7c8"}, - {file = "orjson-3.9.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6ab80b60195f166a9d666b2eaf6d2c74202b6da2a1fb4b4d66b9cc0ce5c9957"}, - {file = "orjson-3.9.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e44ebe2129d43c5a48f3affa3fa59c6484ed16faf5b00486add1061a95384ab0"}, - {file = "orjson-3.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:04e61db09ff155846b69d07cf5aa21001f2010ea669ec3169c1fbad9c9e40cd5"}, - {file = "orjson-3.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c68af71b1110820c914f9df75842895b5528ff524d3286fde57097b2b5ed8f22"}, - {file = "orjson-3.9.0-cp38-none-win_amd64.whl", hash = "sha256:3a208d0bca609de3152eb8320d5093ad9c52979332f626c13500d1645c66bf8d"}, - {file = "orjson-3.9.0-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a901c432828c191332d75f358142736c433d4a192f7794123e1d30d68193de86"}, - {file = "orjson-3.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:271b6f1018757fc6bca40ae72e6cdb6cf84584dde2d1e5eaac30e387a13d9e72"}, - {file = "orjson-3.9.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:949698bdddb1daff986d73e6bbe6cd68833cd80c4adc6b69fafbd46634d4672c"}, - {file = "orjson-3.9.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:108c58d2c7648c991f82f9b2217c50981ad7cf6aaee3efbfaa9d807e49cd69b8"}, - {file = "orjson-3.9.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:08cb43569198c1f5c89ecafcbfc62414f6115d894ff908d8cf8e5e24801364e6"}, - {file = "orjson-3.9.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09ee828572fadcd58bf356d2c1bad99a95c7c9c1f182b407abbc7dec1810f542"}, - {file = "orjson-3.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e7fe5d603ee9177ff2e45858b4fc47fea2da0688f23d9773654889d56dfbc82"}, - {file = "orjson-3.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9ee5f1ba82146a50d61fb58d310a37c0f406eda898172f9c98673b5d6f9461c3"}, - {file = "orjson-3.9.0-cp39-none-win_amd64.whl", hash = "sha256:3235c31d0fe674f6e3433e9ddfed212aa840c83a9b6ef5ae128950e2c808c303"}, - {file = "orjson-3.9.0.tar.gz", hash = "sha256:f6dd27c71cd6e146795f876449a8eae74f67ae1e4e244dfc1203489103eb2d94"}, + {file = "orjson-3.9.1-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c4434b7b786fdc394b95d029fb99949d7c2b05bbd4bf5cb5e3906be96ffeee3b"}, + {file = "orjson-3.9.1-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:761b6efd33c49de20dd73ce64cc59da62c0dab10aa6015f582680e0663cc792c"}, + {file = "orjson-3.9.1-cp311-none-win_amd64.whl", hash = "sha256:0b53b5f72cf536dd8aa4fc4c95e7e09a7adb119f8ff8ee6cc60f735d7740ad6a"}, + {file = "orjson-3.9.1-cp37-cp37m-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d4b68d01a506242316a07f1d2f29fb0a8b36cee30a7c35076f1ef59dce0890c1"}, + {file = "orjson-3.9.1-cp37-none-win_amd64.whl", hash = "sha256:6d173d3921dd58a068c88ec22baea7dbc87a137411501618b1292a9d6252318e"}, + {file = "orjson-3.9.1-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d1c2b0b4246c992ce2529fc610a446b945f1429445ece1c1f826a234c829a918"}, + {file = "orjson-3.9.1-cp38-none-win_amd64.whl", hash = "sha256:402f9d3edfec4560a98880224ec10eba4c5f7b4791e4bc0d4f4d8df5faf2a006"}, + {file = "orjson-3.9.1-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:49c0d78dcd34626e2e934f1192d7c052b94e0ecadc5f386fd2bda6d2e03dadf5"}, + {file = "orjson-3.9.1-cp39-none-win_amd64.whl", hash = "sha256:48a27da6c7306965846565cc385611d03382bbd84120008653aa2f6741e2105d"}, ] [[package]] @@ -7085,48 +7048,48 @@ files = [ [[package]] name = "pydantic" -version = "1.10.8" +version = "1.10.9" description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1243d28e9b05003a89d72e7915fdb26ffd1d39bdd39b00b7dbe4afae4b557f9d"}, - {file = "pydantic-1.10.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0ab53b609c11dfc0c060d94335993cc2b95b2150e25583bec37a49b2d6c6c3f"}, - {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9613fadad06b4f3bc5db2653ce2f22e0de84a7c6c293909b48f6ed37b83c61f"}, - {file = "pydantic-1.10.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df7800cb1984d8f6e249351139667a8c50a379009271ee6236138a22a0c0f319"}, - {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0c6fafa0965b539d7aab0a673a046466d23b86e4b0e8019d25fd53f4df62c277"}, - {file = "pydantic-1.10.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e82d4566fcd527eae8b244fa952d99f2ca3172b7e97add0b43e2d97ee77f81ab"}, - {file = "pydantic-1.10.8-cp310-cp310-win_amd64.whl", hash = "sha256:ab523c31e22943713d80d8d342d23b6f6ac4b792a1e54064a8d0cf78fd64e800"}, - {file = "pydantic-1.10.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:666bdf6066bf6dbc107b30d034615d2627e2121506c555f73f90b54a463d1f33"}, - {file = "pydantic-1.10.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:35db5301b82e8661fa9c505c800d0990bc14e9f36f98932bb1d248c0ac5cada5"}, - {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f90c1e29f447557e9e26afb1c4dbf8768a10cc676e3781b6a577841ade126b85"}, - {file = "pydantic-1.10.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93e766b4a8226e0708ef243e843105bf124e21331694367f95f4e3b4a92bbb3f"}, - {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:88f195f582851e8db960b4a94c3e3ad25692c1c1539e2552f3df7a9e972ef60e"}, - {file = "pydantic-1.10.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:34d327c81e68a1ecb52fe9c8d50c8a9b3e90d3c8ad991bfc8f953fb477d42fb4"}, - {file = "pydantic-1.10.8-cp311-cp311-win_amd64.whl", hash = "sha256:d532bf00f381bd6bc62cabc7d1372096b75a33bc197a312b03f5838b4fb84edd"}, - {file = "pydantic-1.10.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7d5b8641c24886d764a74ec541d2fc2c7fb19f6da2a4001e6d580ba4a38f7878"}, - {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b1f6cb446470b7ddf86c2e57cd119a24959af2b01e552f60705910663af09a4"}, - {file = "pydantic-1.10.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c33b60054b2136aef8cf190cd4c52a3daa20b2263917c49adad20eaf381e823b"}, - {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1952526ba40b220b912cdc43c1c32bcf4a58e3f192fa313ee665916b26befb68"}, - {file = "pydantic-1.10.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bb14388ec45a7a0dc429e87def6396f9e73c8c77818c927b6a60706603d5f2ea"}, - {file = "pydantic-1.10.8-cp37-cp37m-win_amd64.whl", hash = "sha256:16f8c3e33af1e9bb16c7a91fc7d5fa9fe27298e9f299cff6cb744d89d573d62c"}, - {file = "pydantic-1.10.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1ced8375969673929809d7f36ad322934c35de4af3b5e5b09ec967c21f9f7887"}, - {file = "pydantic-1.10.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:93e6bcfccbd831894a6a434b0aeb1947f9e70b7468f274154d03d71fabb1d7c6"}, - {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:191ba419b605f897ede9892f6c56fb182f40a15d309ef0142212200a10af4c18"}, - {file = "pydantic-1.10.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:052d8654cb65174d6f9490cc9b9a200083a82cf5c3c5d3985db765757eb3b375"}, - {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ceb6a23bf1ba4b837d0cfe378329ad3f351b5897c8d4914ce95b85fba96da5a1"}, - {file = "pydantic-1.10.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f2e754d5566f050954727c77f094e01793bcb5725b663bf628fa6743a5a9108"}, - {file = "pydantic-1.10.8-cp38-cp38-win_amd64.whl", hash = "sha256:6a82d6cda82258efca32b40040228ecf43a548671cb174a1e81477195ed3ed56"}, - {file = "pydantic-1.10.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3e59417ba8a17265e632af99cc5f35ec309de5980c440c255ab1ca3ae96a3e0e"}, - {file = "pydantic-1.10.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84d80219c3f8d4cad44575e18404099c76851bc924ce5ab1c4c8bb5e2a2227d0"}, - {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e4148e635994d57d834be1182a44bdb07dd867fa3c2d1b37002000646cc5459"}, - {file = "pydantic-1.10.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12f7b0bf8553e310e530e9f3a2f5734c68699f42218bf3568ef49cd9b0e44df4"}, - {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:42aa0c4b5c3025483240a25b09f3c09a189481ddda2ea3a831a9d25f444e03c1"}, - {file = "pydantic-1.10.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17aef11cc1b997f9d574b91909fed40761e13fac438d72b81f902226a69dac01"}, - {file = "pydantic-1.10.8-cp39-cp39-win_amd64.whl", hash = "sha256:66a703d1983c675a6e0fed8953b0971c44dba48a929a2000a493c3772eb61a5a"}, - {file = "pydantic-1.10.8-py3-none-any.whl", hash = "sha256:7456eb22ed9aaa24ff3e7b4757da20d9e5ce2a81018c1b3ebd81a0b88a18f3b2"}, - {file = "pydantic-1.10.8.tar.gz", hash = "sha256:1410275520dfa70effadf4c21811d755e7ef9bb1f1d077a21958153a92c8d9ca"}, + {file = "pydantic-1.10.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e692dec4a40bfb40ca530e07805b1208c1de071a18d26af4a2a0d79015b352ca"}, + {file = "pydantic-1.10.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c52eb595db83e189419bf337b59154bdcca642ee4b2a09e5d7797e41ace783f"}, + {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:939328fd539b8d0edf244327398a667b6b140afd3bf7e347cf9813c736211896"}, + {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b48d3d634bca23b172f47f2335c617d3fcb4b3ba18481c96b7943a4c634f5c8d"}, + {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f0b7628fb8efe60fe66fd4adadd7ad2304014770cdc1f4934db41fe46cc8825f"}, + {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e1aa5c2410769ca28aa9a7841b80d9d9a1c5f223928ca8bec7e7c9a34d26b1d4"}, + {file = "pydantic-1.10.9-cp310-cp310-win_amd64.whl", hash = "sha256:eec39224b2b2e861259d6f3c8b6290d4e0fbdce147adb797484a42278a1a486f"}, + {file = "pydantic-1.10.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d111a21bbbfd85c17248130deac02bbd9b5e20b303338e0dbe0faa78330e37e0"}, + {file = "pydantic-1.10.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e9aec8627a1a6823fc62fb96480abe3eb10168fd0d859ee3d3b395105ae19a7"}, + {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07293ab08e7b4d3c9d7de4949a0ea571f11e4557d19ea24dd3ae0c524c0c334d"}, + {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee829b86ce984261d99ff2fd6e88f2230068d96c2a582f29583ed602ef3fc2c"}, + {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b466a23009ff5cdd7076eb56aca537c745ca491293cc38e72bf1e0e00de5b91"}, + {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7847ca62e581e6088d9000f3c497267868ca2fa89432714e21a4fb33a04d52e8"}, + {file = "pydantic-1.10.9-cp311-cp311-win_amd64.whl", hash = "sha256:7845b31959468bc5b78d7b95ec52fe5be32b55d0d09983a877cca6aedc51068f"}, + {file = "pydantic-1.10.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:517a681919bf880ce1dac7e5bc0c3af1e58ba118fd774da2ffcd93c5f96eaece"}, + {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67195274fd27780f15c4c372f4ba9a5c02dad6d50647b917b6a92bf00b3d301a"}, + {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2196c06484da2b3fded1ab6dbe182bdabeb09f6318b7fdc412609ee2b564c49a"}, + {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6257bb45ad78abacda13f15bde5886efd6bf549dd71085e64b8dcf9919c38b60"}, + {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3283b574b01e8dbc982080d8287c968489d25329a463b29a90d4157de4f2baaf"}, + {file = "pydantic-1.10.9-cp37-cp37m-win_amd64.whl", hash = "sha256:5f8bbaf4013b9a50e8100333cc4e3fa2f81214033e05ac5aa44fa24a98670a29"}, + {file = "pydantic-1.10.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9cd67fb763248cbe38f0593cd8611bfe4b8ad82acb3bdf2b0898c23415a1f82"}, + {file = "pydantic-1.10.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f50e1764ce9353be67267e7fd0da08349397c7db17a562ad036aa7c8f4adfdb6"}, + {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73ef93e5e1d3c8e83f1ff2e7fdd026d9e063c7e089394869a6e2985696693766"}, + {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128d9453d92e6e81e881dd7e2484e08d8b164da5507f62d06ceecf84bf2e21d3"}, + {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ad428e92ab68798d9326bb3e5515bc927444a3d71a93b4a2ca02a8a5d795c572"}, + {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fab81a92f42d6d525dd47ced310b0c3e10c416bbfae5d59523e63ea22f82b31e"}, + {file = "pydantic-1.10.9-cp38-cp38-win_amd64.whl", hash = "sha256:963671eda0b6ba6926d8fc759e3e10335e1dc1b71ff2a43ed2efd6996634dafb"}, + {file = "pydantic-1.10.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:970b1bdc6243ef663ba5c7e36ac9ab1f2bfecb8ad297c9824b542d41a750b298"}, + {file = "pydantic-1.10.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7e1d5290044f620f80cf1c969c542a5468f3656de47b41aa78100c5baa2b8276"}, + {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83fcff3c7df7adff880622a98022626f4f6dbce6639a88a15a3ce0f96466cb60"}, + {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0da48717dc9495d3a8f215e0d012599db6b8092db02acac5e0d58a65248ec5bc"}, + {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0a2aabdc73c2a5960e87c3ffebca6ccde88665616d1fd6d3db3178ef427b267a"}, + {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9863b9420d99dfa9c064042304868e8ba08e89081428a1c471858aa2af6f57c4"}, + {file = "pydantic-1.10.9-cp39-cp39-win_amd64.whl", hash = "sha256:e7c9900b43ac14110efa977be3da28931ffc74c27e96ee89fbcaaf0b0fe338e1"}, + {file = "pydantic-1.10.9-py3-none-any.whl", hash = "sha256:6cafde02f6699ce4ff643417d1a9223716ec25e228ddc3b436fe7e2d25a1f305"}, + {file = "pydantic-1.10.9.tar.gz", hash = "sha256:95c70da2cd3b6ddf3b9645ecaa8d98f3d80c606624b6d245558d202cd23ea3be"}, ] [package.dependencies] @@ -7213,15 +7176,16 @@ tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"] [[package]] name = "pylance" -version = "0.4.19" +version = "0.4.20" description = "python wrapper for lance-rs" category = "main" optional = true python-versions = ">=3.8" files = [ - {file = "pylance-0.4.19-cp38-abi3-macosx_10_15_x86_64.whl", hash = "sha256:7f5dbf2c384de6c499a2d876be4aec5cbb8f8ef65d490ac93fbd84251ae61e7c"}, - {file = "pylance-0.4.19-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:b2125d50ef28af11b5473e72f82ce9af30756ae655b982582c58267b72216043"}, - {file = "pylance-0.4.19-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f98fc8cbc5f66d35ea7701eae691c195df4e45cca8c8f33d9e1a71f24077d7a6"}, + {file = "pylance-0.4.20-cp38-abi3-macosx_10_15_x86_64.whl", hash = "sha256:fe5cc47721f1a45c5069e32e236538e6f464d644351a1a0236352b3c5700f729"}, + {file = "pylance-0.4.20-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:6e783fc2dcaadb62f4307cd6d504d7be8d17474c61d79a7dfd078afef6e8cb8a"}, + {file = "pylance-0.4.20-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b9fa8550db9eb52126ee19ac039027d7eb6d0d6849b0c43db9fbb69eebe8ab6"}, + {file = "pylance-0.4.20-cp38-abi3-win_amd64.whl", hash = "sha256:d09375070ee5d4e59c6cfac74f2a10e7907127a0317115b08097547318ec9efa"}, ] [package.dependencies] @@ -8454,6 +8418,67 @@ botocore = ">=1.12.36,<2.0a.0" [package.extras] crt = ["botocore[crt] (>=1.20.29,<2.0a.0)"] +[[package]] +name = "safetensors" +version = "0.3.1" +description = "Fast and Safe Tensor serialization" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "safetensors-0.3.1-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:2ae9b7dd268b4bae6624729dac86deb82104820e9786429b0583e5168db2f770"}, + {file = "safetensors-0.3.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:08c85c1934682f1e2cd904d38433b53cd2a98245a7cc31f5689f9322a2320bbf"}, + {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba625c7af9e1c5d0d91cb83d2fba97d29ea69d4db2015d9714d24c7f6d488e15"}, + {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b57d5890c619ec10d9f1b6426b8690d0c9c2868a90dc52f13fae6f6407ac141f"}, + {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c9f562ea696d50b95cadbeb1716dc476714a87792ffe374280c0835312cbfe2"}, + {file = "safetensors-0.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c115951b3a865ece8d98ee43882f2fd0a999c0200d6e6fec24134715ebe3b57"}, + {file = "safetensors-0.3.1-cp310-cp310-win32.whl", hash = "sha256:118f8f7503ea312fc7af27e934088a1b589fb1eff5a7dea2cd1de6c71ee33391"}, + {file = "safetensors-0.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:54846eaae25fded28a7bebbb66be563cad221b4c80daee39e2f55df5e5e0266f"}, + {file = "safetensors-0.3.1-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:5af82e10946c4822506db0f29269f43147e889054704dde994d4e22f0c37377b"}, + {file = "safetensors-0.3.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:626c86dd1d930963c8ea7f953a3787ae85322551e3a5203ac731d6e6f3e18f44"}, + {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12e30677e6af1f4cc4f2832546e91dbb3b0aa7d575bfa473d2899d524e1ace08"}, + {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d534b80bc8d39945bb902f34b0454773971fe9e5e1f2142af451759d7e52b356"}, + {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ddd0ddd502cf219666e7d30f23f196cb87e829439b52b39f3e7da7918c3416df"}, + {file = "safetensors-0.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:997a2cc14023713f423e6d16536d55cb16a3d72850f142e05f82f0d4c76d383b"}, + {file = "safetensors-0.3.1-cp311-cp311-win32.whl", hash = "sha256:6ae9ca63d9e22f71ec40550207bd284a60a6b4916ae6ca12c85a8d86bf49e0c3"}, + {file = "safetensors-0.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:62aa7421ca455418423e35029524489480adda53e3f702453580180ecfebe476"}, + {file = "safetensors-0.3.1-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:6d54b3ed367b6898baab75dfd057c24f36ec64d3938ffff2af981d56bfba2f42"}, + {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:262423aeda91117010f8c607889066028f680fbb667f50cfe6eae96f22f9d150"}, + {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10efe2513a8327fd628cea13167089588acc23093ba132aecfc536eb9a4560fe"}, + {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:689b3d6a7ebce70ee9438267ee55ea89b575c19923876645e927d08757b552fe"}, + {file = "safetensors-0.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14cd9a87bc73ce06903e9f8ee8b05b056af6f3c9f37a6bd74997a16ed36ff5f4"}, + {file = "safetensors-0.3.1-cp37-cp37m-win32.whl", hash = "sha256:a77cb39624480d5f143c1cc272184f65a296f573d61629eff5d495d2e0541d3e"}, + {file = "safetensors-0.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:9eff3190bfbbb52eef729911345c643f875ca4dbb374aa6c559675cfd0ab73db"}, + {file = "safetensors-0.3.1-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:05cbfef76e4daa14796db1bbb52072d4b72a44050c368b2b1f6fd3e610669a89"}, + {file = "safetensors-0.3.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:c49061461f4a81e5ec3415070a3f135530834c89cbd6a7db7cd49e3cb9d9864b"}, + {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22cf7e73ca42974f098ce0cf4dd8918983700b6b07a4c6827d50c8daefca776e"}, + {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:04f909442d6223ff0016cd2e1b2a95ef8039b92a558014627363a2e267213f62"}, + {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c573c5a0d5d45791ae8c179e26d74aff86e719056591aa7edb3ca7be55bc961"}, + {file = "safetensors-0.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6994043b12e717cf2a6ba69077ac41f0d3675b2819734f07f61819e854c622c7"}, + {file = "safetensors-0.3.1-cp38-cp38-win32.whl", hash = "sha256:158ede81694180a0dbba59422bc304a78c054b305df993c0c6e39c6330fa9348"}, + {file = "safetensors-0.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:afdc725beff7121ea8d39a7339f5a6abcb01daa189ea56290b67fe262d56e20f"}, + {file = "safetensors-0.3.1-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:cba910fcc9e5e64d32d62b837388721165e9c7e45d23bc3a38ad57694b77f40d"}, + {file = "safetensors-0.3.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a4f7dbfe7285573cdaddd85ef6fa84ebbed995d3703ab72d71257944e384612f"}, + {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54aed0802f9eaa83ca7b1cbb986bfb90b8e2c67b6a4bcfe245627e17dad565d4"}, + {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:34b75a766f3cfc99fd4c33e329b76deae63f5f388e455d863a5d6e99472fca8e"}, + {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a0f31904f35dc14919a145b2d7a2d8842a43a18a629affe678233c4ea90b4af"}, + {file = "safetensors-0.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcf527ecc5f58907fd9031510378105487f318cc91ecdc5aee3c7cc8f46030a8"}, + {file = "safetensors-0.3.1-cp39-cp39-win32.whl", hash = "sha256:e2f083112cf97aa9611e2a05cc170a2795eccec5f6ff837f4565f950670a9d83"}, + {file = "safetensors-0.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:5f4f614b8e8161cd8a9ca19c765d176a82b122fa3d3387b77862145bfe9b4e93"}, + {file = "safetensors-0.3.1.tar.gz", hash = "sha256:571da56ff8d0bec8ae54923b621cda98d36dcef10feb36fd492c4d0c2cd0e869"}, +] + +[package.extras] +all = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (>=2.11.0)", "torch (>=1.10)"] +dev = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "flax (>=0.6.3)", "h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "isort (>=5.5.4)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)", "numpy (>=1.21.6)", "paddlepaddle (>=2.4.1)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)", "tensorflow (>=2.11.0)", "torch (>=1.10)"] +jax = ["flax (>=0.6.3)", "jax (>=0.3.25)", "jaxlib (>=0.3.25)"] +numpy = ["numpy (>=1.21.6)"] +paddlepaddle = ["paddlepaddle (>=2.4.1)"] +quality = ["black (==22.3)", "click (==8.0.4)", "flake8 (>=3.8.3)", "isort (>=5.5.4)"] +tensorflow = ["tensorflow (>=2.11.0)"] +testing = ["h5py (>=3.7.0)", "huggingface-hub (>=0.12.1)", "numpy (>=1.21.6)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "setuptools-rust (>=1.5.2)"] +torch = ["torch (>=1.10)"] + [[package]] name = "scikit-learn" version = "1.2.2" @@ -9332,14 +9357,14 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyam [[package]] name = "steamship" -version = "2.17.6" +version = "2.17.7" description = "The fastest way to add language AI to your product." category = "main" optional = true python-versions = "*" files = [ - {file = "steamship-2.17.6-py3-none-any.whl", hash = "sha256:7d25db57f19d228f82ce445e15ace66b2a2e3ac25307d69c4828c27026e8e44c"}, - {file = "steamship-2.17.6.tar.gz", hash = "sha256:ff3af96d7f0eef2036c222513af51ac356f0753123a7bf44bdb31eccd2d957df"}, + {file = "steamship-2.17.7-py3-none-any.whl", hash = "sha256:3b0092d3e810ca33b70964088557a9836a8625b29f211b8949f4c91a9bea9ab2"}, + {file = "steamship-2.17.7.tar.gz", hash = "sha256:3644b25fe6a3345c2480ed0bab851faef8c00e01d7893da82e9d910900c9bf4c"}, ] [package.dependencies] @@ -10103,14 +10128,14 @@ test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] [[package]] name = "transformers" -version = "4.29.2" +version = "4.30.0" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" category = "main" optional = false python-versions = ">=3.7.0" files = [ - {file = "transformers-4.29.2-py3-none-any.whl", hash = "sha256:0ef158b99bad6f4e6652a0d8655fbbe58b4cb788ce7040f320b5d29c7c810a75"}, - {file = "transformers-4.29.2.tar.gz", hash = "sha256:ed9467661f459f1ce49461d83f18f3b36b6a37f306182dc2ba272935f3b93ebb"}, + {file = "transformers-4.30.0-py3-none-any.whl", hash = "sha256:e90e9fc05310985f3ede2da278d11c91656b4a354b4935c54604f57409299aae"}, + {file = "transformers-4.30.0.tar.gz", hash = "sha256:478e1709738237aa1b7bae1fd0ba7bd9d44352fe45972df7ed060077257e84f9"}, ] [package.dependencies] @@ -10121,28 +10146,29 @@ packaging = ">=20.0" pyyaml = ">=5.1" regex = "!=2019.12.17" requests = "*" +safetensors = ">=0.3.1" tokenizers = ">=0.11.1,<0.11.3 || >0.11.3,<0.14" tqdm = ">=4.27" [package.extras] -accelerate = ["accelerate (>=0.19.0)"] -agents = ["Pillow", "accelerate (>=0.19.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.9,!=1.12.0)"] -all = ["Pillow", "accelerate (>=0.19.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "numba (<0.57.0)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.2)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"] -audio = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)"] +accelerate = ["accelerate (>=0.20.2)"] +agents = ["Pillow", "accelerate (>=0.20.2)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.9,!=1.12.0)"] +all = ["Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.3)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"] +audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] codecarbon = ["codecarbon (==1.2.0)"] -deepspeed = ["accelerate (>=0.19.0)", "deepspeed (>=0.8.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.19.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.8.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf (<=3.20.2)", "psutil", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.19.0)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.6.9)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "numba (<0.57.0)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pytest", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "numba (<0.57.0)", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.19.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "numba (<0.57.0)", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.2)", "psutil", "pyctcdecode (>=0.4.0)", "pytest", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -docs = ["Pillow", "accelerate (>=0.19.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "hf-doc-builder", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "numba (<0.57.0)", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.2)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"] +deepspeed = ["accelerate (>=0.20.2)", "deepspeed (>=0.8.3)"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.20.2)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.8.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf (<=3.20.3)", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.6.9)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow", "accelerate (>=0.20.2)", "beautifulsoup4", "black (>=23.1,<24.0)", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf (<=3.20.3)", "psutil", "pyctcdecode (>=0.4.0)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune]", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (>=0.0.241,<=0.0.259)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "timeout-decorator", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +docs = ["Pillow", "accelerate (>=0.20.2)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.6.9)", "hf-doc-builder", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf (<=3.20.3)", "pyctcdecode (>=0.4.0)", "ray[tune]", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx", "timm", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "torchaudio", "torchvision"] docs-specific = ["hf-doc-builder"] fairscale = ["fairscale (>0.3)"] flax = ["flax (>=0.4.1,<=0.6.9)", "jax (>=0.2.8,!=0.3.2,<=0.3.6)", "jaxlib (>=0.1.65,<=0.3.6)", "optax (>=0.0.8,<=0.1.4)"] -flax-speech = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)"] +flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] ftfy = ["ftfy"] integrations = ["optuna", "ray[tune]", "sigopt"] -ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] +ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] modelcreation = ["cookiecutter (==1.7.3)"] natten = ["natten (>=0.14.6)"] onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] @@ -10152,21 +10178,21 @@ quality = ["GitPython (<3.1.19)", "black (>=23.1,<24.0)", "datasets (!=2.5.0)", ray = ["ray[tune]"] retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] sagemaker = ["sagemaker (>=2.31.0)"] -sentencepiece = ["protobuf (<=3.20.2)", "sentencepiece (>=0.1.91,!=0.1.92)"] +sentencepiece = ["protobuf (<=3.20.3)", "sentencepiece (>=0.1.91,!=0.1.92)"] serving = ["fastapi", "pydantic", "starlette", "uvicorn"] sigopt = ["sigopt"] sklearn = ["scikit-learn"] -speech = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf (<=3.20.2)", "psutil", "pytest", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "safetensors (>=0.2.1)", "timeout-decorator"] +speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "black (>=23.1,<24.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf (<=3.20.3)", "psutil", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "timeout-decorator"] tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx"] tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.4,<2.13)", "tensorflow-text (<2.13)", "tf2onnx"] -tf-speech = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)"] +tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] timm = ["timm"] tokenizers = ["tokenizers (>=0.11.1,!=0.11.3,<0.14)"] -torch = ["accelerate (>=0.19.0)", "torch (>=1.9,!=1.12.0)"] -torch-speech = ["kenlm", "librosa", "numba (<0.57.0)", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] +torch = ["accelerate (>=0.20.2)", "torch (>=1.9,!=1.12.0)"] +torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] torch-vision = ["Pillow", "torchvision"] -torchhub = ["filelock", "huggingface-hub (>=0.14.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf (<=3.20.2)", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "tqdm (>=4.27)"] +torchhub = ["filelock", "huggingface-hub (>=0.14.1,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf (<=3.20.3)", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.11.1,!=0.11.3,<0.14)", "torch (>=1.9,!=1.12.0)", "tqdm (>=4.27)"] video = ["av (==9.2.0)", "decord (==0.6.0)"] vision = ["Pillow"] @@ -10649,14 +10675,14 @@ files = [ [[package]] name = "websocket-client" -version = "1.5.2" +version = "1.5.3" description = "WebSocket client for Python with low level API options" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "websocket-client-1.5.2.tar.gz", hash = "sha256:c7d67c13b928645f259d9b847ab5b57fd2d127213ca41ebd880de1f553b7c23b"}, - {file = "websocket_client-1.5.2-py3-none-any.whl", hash = "sha256:f8c64e28cd700e7ba1f04350d66422b6833b82a796b525a51e740b8cc8dab4b1"}, + {file = "websocket-client-1.5.3.tar.gz", hash = "sha256:b96f3bce3e54e3486ebe6504bc22bd4c140392bd2eb71764db29be8f2639aa65"}, + {file = "websocket_client-1.5.3-py3-none-any.whl", hash = "sha256:3566f8467cd350874c4913816355642a4942f6c1ed1e9406e3d42fae6d6c072a"}, ] [package.extras] @@ -10746,14 +10772,14 @@ files = [ [[package]] name = "werkzeug" -version = "2.3.4" +version = "2.3.6" description = "The comprehensive WSGI web application library." category = "main" optional = true python-versions = ">=3.8" files = [ - {file = "Werkzeug-2.3.4-py3-none-any.whl", hash = "sha256:48e5e61472fee0ddee27ebad085614ebedb7af41e88f687aaf881afb723a162f"}, - {file = "Werkzeug-2.3.4.tar.gz", hash = "sha256:1d5a58e0377d1fe39d061a5de4469e414e78ccb1e1e59c0f5ad6fa1c36c52b76"}, + {file = "Werkzeug-2.3.6-py3-none-any.whl", hash = "sha256:935539fa1413afbb9195b24880778422ed620c0fc09670945185cce4d91a8890"}, + {file = "Werkzeug-2.3.6.tar.gz", hash = "sha256:98c774df2f91b05550078891dee5f0eb0cb797a522c757a2452b9cee5b202330"}, ] [package.dependencies] @@ -11340,13 +11366,13 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\ cffi = ["cffi (>=1.11)"] [extras] -all = ["O365", "aleph-alpha-client", "anthropic", "arxiv", "atlassian-python-api", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-cosmos", "azure-identity", "beautifulsoup4", "clickhouse-connect", "cohere", "deeplake", "docarray", "duckduckgo-search", "elasticsearch", "faiss-cpu", "google-api-python-client", "google-auth", "google-search-results", "gptcache", "html2text", "huggingface_hub", "jina", "jinja2", "jq", "lancedb", "langkit", "lark", "lxml", "manifest-ml", "momento", "nebula3-python", "neo4j", "networkx", "nlpcloud", "nltk", "nomic", "openai", "openlm", "opensearch-py", "pdfminer-six", "pexpect", "pgvector", "pinecone-client", "pinecone-text", "psycopg2-binary", "pymongo", "pyowm", "pypdf", "pytesseract", "pyvespa", "qdrant-client", "redis", "requests-toolbelt", "sentence-transformers", "singlestoredb", "spacy", "steamship", "tensorflow-text", "tigrisdb", "tiktoken", "torch", "transformers", "weaviate-client", "wikipedia", "wolframalpha"] -azure = ["azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-core", "azure-cosmos", "azure-identity", "openai"] +all = ["anthropic", "cohere", "openai", "nlpcloud", "huggingface_hub", "jina", "manifest-ml", "elasticsearch", "opensearch-py", "google-search-results", "faiss-cpu", "sentence-transformers", "transformers", "spacy", "nltk", "wikipedia", "beautifulsoup4", "tiktoken", "torch", "jinja2", "pinecone-client", "pinecone-text", "pymongo", "weaviate-client", "redis", "google-api-python-client", "google-auth", "wolframalpha", "qdrant-client", "tensorflow-text", "pypdf", "networkx", "nomic", "aleph-alpha-client", "deeplake", "pgvector", "psycopg2-binary", "pyowm", "pytesseract", "html2text", "atlassian-python-api", "gptcache", "duckduckgo-search", "arxiv", "azure-identity", "clickhouse-connect", "azure-cosmos", "lancedb", "langkit", "lark", "pexpect", "pyvespa", "O365", "jq", "docarray", "steamship", "pdfminer-six", "lxml", "requests-toolbelt", "neo4j", "openlm", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "momento", "singlestoredb", "tigrisdb", "nebula3-python"] +azure = ["azure-identity", "azure-cosmos", "openai", "azure-core", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] -extended-testing = ["atlassian-python-api", "beautifulsoup4", "beautifulsoup4", "bibtexparser", "chardet", "gql", "html2text", "jq", "lxml", "pandas", "pdfminer-six", "psychicapi", "py-trello", "pymupdf", "pypdf", "pypdfium2", "pyspark", "requests-toolbelt", "scikit-learn", "telethon", "tqdm", "zep-python"] -llms = ["anthropic", "cohere", "huggingface_hub", "manifest-ml", "nlpcloud", "openai", "openlm", "torch", "transformers"] +extended-testing = ["beautifulsoup4", "bibtexparser", "chardet", "jq", "pdfminer-six", "pypdf", "pymupdf", "pypdfium2", "tqdm", "lxml", "atlassian-python-api", "beautifulsoup4", "pandas", "telethon", "psychicapi", "zep-python", "gql", "requests-toolbelt", "html2text", "py-trello", "scikit-learn", "pyspark"] +llms = ["anthropic", "cohere", "openai", "openlm", "nlpcloud", "huggingface_hub", "manifest-ml", "torch", "transformers"] openai = ["openai", "tiktoken"] qdrant = ["qdrant-client"] text-helpers = ["chardet"] From 7af186fddf75d60c2e17b1af7c639e69412da2fc Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Fri, 9 Jun 2023 09:15:53 -0700 Subject: [PATCH 02/46] fixes to docs (#5919) --- docs/modules/indexes/document_loaders.rst | 2 ++ .../document_loaders/examples/snowflake.ipynb | 13 +++++++++++-- langchain/document_loaders/__init__.py | 2 ++ langchain/document_loaders/snowflake_loader.py | 4 +--- 4 files changed, 16 insertions(+), 5 deletions(-) diff --git a/docs/modules/indexes/document_loaders.rst b/docs/modules/indexes/document_loaders.rst index 8848463b82e58..190abbef2442e 100644 --- a/docs/modules/indexes/document_loaders.rst +++ b/docs/modules/indexes/document_loaders.rst @@ -116,6 +116,7 @@ We need access tokens and sometime other parameters to get access to these datas ./document_loaders/examples/discord_loader.ipynb ./document_loaders/examples/docugami.ipynb ./document_loaders/examples/duckdb.ipynb + ./document_loaders/examples/fauna.ipynb ./document_loaders/examples/figma.ipynb ./document_loaders/examples/gitbook.ipynb ./document_loaders/examples/git.ipynb @@ -137,6 +138,7 @@ We need access tokens and sometime other parameters to get access to these datas ./document_loaders/examples/reddit.ipynb ./document_loaders/examples/roam.ipynb ./document_loaders/examples/slack.ipynb + ./document_loaders/examples/snowflake.ipynb ./document_loaders/examples/spreedly.ipynb ./document_loaders/examples/stripe.ipynb ./document_loaders/examples/tomarkdown.ipynb diff --git a/docs/modules/indexes/document_loaders/examples/snowflake.ipynb b/docs/modules/indexes/document_loaders/examples/snowflake.ipynb index f84c2e5468327..7e6cfa042f7e6 100644 --- a/docs/modules/indexes/document_loaders/examples/snowflake.ipynb +++ b/docs/modules/indexes/document_loaders/examples/snowflake.ipynb @@ -1,5 +1,14 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Snowflake\n", + "\n", + "This notebooks goes over how to load documents from Snowflake" + ] + }, { "cell_type": "code", "execution_count": null, @@ -16,7 +25,7 @@ "outputs": [], "source": [ "import settings as s\n", - "from langchain.document_loaders.snowflake_loader import SnowflakeLoader" + "from langchain.document_loaders import SnowflakeLoader" ] }, { @@ -81,7 +90,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.9.1" } }, "nbformat": 4, diff --git a/langchain/document_loaders/__init__.py b/langchain/document_loaders/__init__.py index 84a825062b947..393062b7a5db0 100644 --- a/langchain/document_loaders/__init__.py +++ b/langchain/document_loaders/__init__.py @@ -90,6 +90,7 @@ from langchain.document_loaders.s3_file import S3FileLoader from langchain.document_loaders.sitemap import SitemapLoader from langchain.document_loaders.slack_directory import SlackDirectoryLoader +from langchain.document_loaders.snowflake_loader import SnowflakeLoader from langchain.document_loaders.spreedly import SpreedlyLoader from langchain.document_loaders.srt import SRTLoader from langchain.document_loaders.stripe import StripeLoader @@ -244,4 +245,5 @@ "WhatsAppChatLoader", "WikipediaLoader", "YoutubeLoader", + "SnowflakeLoader", ] diff --git a/langchain/document_loaders/snowflake_loader.py b/langchain/document_loaders/snowflake_loader.py index 30978fed12225..115f6e7b9eb38 100644 --- a/langchain/document_loaders/snowflake_loader.py +++ b/langchain/document_loaders/snowflake_loader.py @@ -2,8 +2,6 @@ from typing import Any, Dict, Iterator, List, Optional, Tuple -from snowflake.connector import DictCursor - from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader @@ -61,6 +59,7 @@ def __init__( def _execute_query(self) -> List[Dict[str, Any]]: try: import snowflake.connector + from snowflake.connector import DictCursor except ImportError as ex: raise ValueError( "Could not import snowflake-connector-python package. " @@ -77,7 +76,6 @@ def _execute_query(self) -> List[Dict[str, Any]]: schema=self.schema, parameters=self.parameters, ) - query_result = [] try: cur = conn.cursor(DictCursor) cur.execute("USE DATABASE " + self.database) From 3678cba0bef7e538fafee0d094ace4633c14fff9 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Fri, 9 Jun 2023 09:17:08 -0700 Subject: [PATCH 03/46] bump ver to 195 (#5949) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 9aa7ff3fb897d..f8bfe4ef37ba1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain" -version = "0.0.194" +version = "0.0.195" description = "Building applications with LLMs through composability" authors = [] license = "MIT" From f3e7ac0a2c0ad677e91571f59b03b55c5af52db2 Mon Sep 17 00:00:00 2001 From: Lance Martin <122662504+rlancemartin@users.noreply.github.com> Date: Fri, 9 Jun 2023 11:27:29 -0700 Subject: [PATCH 04/46] Add load() to snowflake loader (#5956) Quick fix for recently added [snowflake data loader](https://github.com/hwchase17/langchain/pull/5825/files). --- langchain/document_loaders/snowflake_loader.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/langchain/document_loaders/snowflake_loader.py b/langchain/document_loaders/snowflake_loader.py index 115f6e7b9eb38..b76a7426096a3 100644 --- a/langchain/document_loaders/snowflake_loader.py +++ b/langchain/document_loaders/snowflake_loader.py @@ -118,3 +118,6 @@ def lazy_load(self) -> Iterator[Document]: metadata = {k: v for k, v in row.items() if k in metadata_columns} doc = Document(page_content=page_content, metadata=metadata) yield doc + + def load(self) -> List[Document]: + return list(self.lazy_load()) From 736a1819aa4fe2773f7e33c13043d5cebe947c1b Mon Sep 17 00:00:00 2001 From: German Martin Date: Sat, 10 Jun 2023 12:41:02 -0300 Subject: [PATCH 05/46] LOTR: Lord of the Retrievers. A retriever that merge several retrievers together applying document_formatters to them. (#5798) "One Retriever to merge them all, One Retriever to expose them, One Retriever to bring them all and in and process them with Document formatters." Hi @dev2049! Here bothering people again! I'm using this simple idea to deal with merging the output of several retrievers into one. I'm aware of DocumentCompressorPipeline and ContextualCompressionRetriever but I don't think they allow us to do something like this. Also I was getting in trouble to get the pipeline working too. Please correct me if i'm wrong. This allow to do some sort of "retrieval" preprocessing and then using the retrieval with the curated results anywhere you could use a retriever. My use case is to generate diff indexes with diff embeddings and sources for a more colorful results then filtering them with one or many document formatters. I saw some people looking for something like this, here: https://github.com/hwchase17/langchain/issues/3991 and something similar here: https://github.com/hwchase17/langchain/issues/5555 This is just a proposal I know I'm missing tests , etc. If you think this is a worth it idea I can work on tests and anything you want to change. Let me know! --------- Co-authored-by: Harrison Chase --- .../examples/merger_retriever.ipynb | 121 ++++++++++++++++++ langchain/retrievers/__init__.py | 2 + langchain/retrievers/merger_retriever.py | 110 ++++++++++++++++ .../retrievers/test_merger_retriever.py | 32 +++++ 4 files changed, 265 insertions(+) create mode 100644 docs/modules/indexes/retrievers/examples/merger_retriever.ipynb create mode 100644 langchain/retrievers/merger_retriever.py create mode 100644 tests/integration_tests/retrievers/test_merger_retriever.py diff --git a/docs/modules/indexes/retrievers/examples/merger_retriever.ipynb b/docs/modules/indexes/retrievers/examples/merger_retriever.ipynb new file mode 100644 index 0000000000000..0919dceec0a8e --- /dev/null +++ b/docs/modules/indexes/retrievers/examples/merger_retriever.ipynb @@ -0,0 +1,121 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "fc0db1bc", + "metadata": {}, + "source": [ + "# LOTR (Merger Retriever)\n", + "\n", + "Lord of the Retrievers, also known as MergerRetriever, takes a list of retrievers as input and merges the results of their get_relevant_documents() methods into a single list. The merged results will be a list of documents that are relevant to the query and that have been ranked by the different retrievers.\n", + "\n", + "The MergerRetriever class can be used to improve the accuracy of document retrieval in a number of ways. First, it can combine the results of multiple retrievers, which can help to reduce the risk of bias in the results. Second, it can rank the results of the different retrievers, which can help to ensure that the most relevant documents are returned first." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9fbcc58f", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import chromadb\n", + "from langchain.retrievers.merger_retriever import MergerRetriever\n", + "from langchain.vectorstores import Chroma\n", + "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain.document_transformers import EmbeddingsRedundantFilter\n", + "from langchain.retrievers.document_compressors import DocumentCompressorPipeline\n", + "from langchain.retrievers import ContextualCompressionRetriever\n", + "\n", + "# Get 3 diff embeddings.\n", + "all_mini = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")\n", + "multi_qa_mini = HuggingFaceEmbeddings(model_name=\"multi-qa-MiniLM-L6-dot-v1\")\n", + "filter_embeddings = OpenAIEmbeddings()\n", + "\n", + "ABS_PATH = os.path.dirname(os.path.abspath(__file__))\n", + "DB_DIR = os.path.join(ABS_PATH, \"db\")\n", + "\n", + "# Instantiate 2 diff cromadb indexs, each one with a diff embedding.\n", + "client_settings = chromadb.config.Settings(\n", + " chroma_db_impl=\"duckdb+parquet\",\n", + " persist_directory=DB_DIR,\n", + " anonymized_telemetry=False,\n", + ")\n", + "db_all = Chroma(\n", + " collection_name=\"project_store_all\",\n", + " persist_directory=DB_DIR,\n", + " client_settings=client_settings,\n", + " embedding_function=all_mini,\n", + ")\n", + "db_multi_qa = Chroma(\n", + " collection_name=\"project_store_multi\",\n", + " persist_directory=DB_DIR,\n", + " client_settings=client_settings,\n", + " embedding_function=multi_qa_mini,\n", + ")\n", + "\n", + "# Define 2 diff retrievers with 2 diff embeddings and diff search type.\n", + "retriever_all = db_all.as_retriever(\n", + " search_type=\"similarity\", search_kwargs={\"k\": 5, \"include_metadata\": True}\n", + ")\n", + "retriever_multi_qa = db_multi_qa.as_retriever(\n", + " search_type=\"mmr\", search_kwargs={\"k\": 5, \"include_metadata\": True}\n", + ")\n", + "\n", + "# The Lord of the Retrievers will hold the ouput of boths retrievers and can be used as any other \n", + "# retriever on different types of chains.\n", + "lotr = MergerRetriever(retrievers=[retriever_all, retriever_multi_qa])\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c152339d", + "metadata": {}, + "source": [ + "## Remove redundant results from the merged retrievers." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "039faea6", + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# We can remove redundant results from both retrievers using yet another embedding. \n", + "# Using multiples embeddings in diff steps could help reduce biases.\n", + "filter = EmbeddingsRedundantFilter(embeddings=filter_embeddings)\n", + "pipeline = DocumentCompressorPipeline(transformers=[filter])\n", + "compression_retriever = ContextualCompressionRetriever(\n", + " base_compressor=pipeline, base_retriever=lotr\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/retrievers/__init__.py b/langchain/retrievers/__init__.py index bb3d2eac469e7..19b67f924520c 100644 --- a/langchain/retrievers/__init__.py +++ b/langchain/retrievers/__init__.py @@ -6,6 +6,7 @@ from langchain.retrievers.databerry import DataberryRetriever from langchain.retrievers.elastic_search_bm25 import ElasticSearchBM25Retriever from langchain.retrievers.knn import KNNRetriever +from langchain.retrievers.merger_retriever import MergerRetriever from langchain.retrievers.metal import MetalRetriever from langchain.retrievers.pinecone_hybrid_search import PineconeHybridSearchRetriever from langchain.retrievers.pupmed import PubMedRetriever @@ -31,6 +32,7 @@ "DataberryRetriever", "ElasticSearchBM25Retriever", "KNNRetriever", + "MergerRetriever", "MetalRetriever", "PineconeHybridSearchRetriever", "RemoteLangChainRetriever", diff --git a/langchain/retrievers/merger_retriever.py b/langchain/retrievers/merger_retriever.py new file mode 100644 index 0000000000000..a9dccdc4ba551 --- /dev/null +++ b/langchain/retrievers/merger_retriever.py @@ -0,0 +1,110 @@ +from typing import List + +from langchain.schema import BaseRetriever, Document + + +class MergerRetriever(BaseRetriever): + """ + This class merges the results of multiple retrievers. + + Args: + retrievers: A list of retrievers to merge. + """ + + def __init__( + self, + retrievers: List[BaseRetriever], + ): + """ + Initialize the MergerRetriever class. + + Args: + retrievers: A list of retrievers to merge. + """ + + self.retrievers = retrievers + + def get_relevant_documents(self, query: str) -> List[Document]: + """ + Get the relevant documents for a given query. + + Args: + query: The query to search for. + + Returns: + A list of relevant documents. + """ + + # Merge the results of the retrievers. + merged_documents = self.merge_documents(query) + + return merged_documents + + async def aget_relevant_documents(self, query: str) -> List[Document]: + """ + Asynchronously get the relevant documents for a given query. + + Args: + query: The query to search for. + + Returns: + A list of relevant documents. + """ + + # Merge the results of the retrievers. + merged_documents = await self.amerge_documents(query) + + return merged_documents + + def merge_documents(self, query: str) -> List[Document]: + """ + Merge the results of the retrievers. + + Args: + query: The query to search for. + + Returns: + A list of merged documents. + """ + + # Get the results of all retrievers. + retriever_docs = [ + retriever.get_relevant_documents(query) for retriever in self.retrievers + ] + + # Merge the results of the retrievers. + merged_documents = [] + max_docs = max(len(docs) for docs in retriever_docs) + for i in range(max_docs): + for retriever, doc in zip(self.retrievers, retriever_docs): + if i < len(doc): + merged_documents.append(doc[i]) + + return merged_documents + + async def amerge_documents(self, query: str) -> List[Document]: + """ + Asynchronously merge the results of the retrievers. + + Args: + query: The query to search for. + + Returns: + A list of merged documents. + """ + + # Get the results of all retrievers. + retriever_docs = [ + await retriever.aget_relevant_documents(query) + for retriever in self.retrievers + ] + + # Merge the results of the retrievers. + merged_documents = [] + max_docs = max(len(docs) for docs in retriever_docs) + for i in range(max_docs): + for retriever, doc in zip(self.retrievers, retriever_docs): + if i < len(doc): + merged_documents.append(doc[i]) + + return merged_documents diff --git a/tests/integration_tests/retrievers/test_merger_retriever.py b/tests/integration_tests/retrievers/test_merger_retriever.py new file mode 100644 index 0000000000000..f42f66447839c --- /dev/null +++ b/tests/integration_tests/retrievers/test_merger_retriever.py @@ -0,0 +1,32 @@ +from langchain.embeddings import OpenAIEmbeddings +from langchain.retrievers.merger_retriever import MergerRetriever +from langchain.vectorstores import Chroma + + +def test_merger_retriever_get_relevant_docs() -> None: + """Test get_relevant_docs.""" + texts_group_a = [ + "This is a document about the Boston Celtics", + "Fly me to the moon is one of my favourite songs." + "I simply love going to the movies", + ] + texts_group_b = [ + "This is a document about the Poenix Suns", + "The Boston Celtics won the game by 20 points", + "Real stupidity beats artificial intelligence every time. TP", + ] + embeddings = OpenAIEmbeddings() + retriever_a = Chroma.from_texts(texts_group_a, embedding=embeddings).as_retriever( + search_kwargs={"k": 1} + ) + retriever_b = Chroma.from_texts(texts_group_b, embedding=embeddings).as_retriever( + search_kwargs={"k": 1} + ) + + # The Lord of the Retrievers. + lotr = MergerRetriever([retriever_a, retriever_b]) + + actual = lotr.get_relevant_documents("Tell me about the Celtics") + assert len(actual) == 2 + assert texts_group_a[0] in [d.page_content for d in actual] + assert texts_group_b[1] in [d.page_content for d in actual] From 62ec10a7f5110b3b264c3851fc98d784848f7259 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sat, 10 Jun 2023 09:06:35 -0700 Subject: [PATCH 06/46] bump version to 196 (#5988) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f8bfe4ef37ba1..f0f68ae3a4e7f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain" -version = "0.0.195" +version = "0.0.196" description = "Building applications with LLMs through composability" authors = [] license = "MIT" From d6f5d0c6b10f50147432a24bcb076135ab2c8f00 Mon Sep 17 00:00:00 2001 From: Kaarthik Andavar Date: Sun, 11 Jun 2023 08:03:50 +1200 Subject: [PATCH 07/46] Fix: SnowflakeLoader returning empty documents (#5967) **Fix SnowflakeLoader's Behavior of Returning Empty Documents** **Description:** This PR addresses the issue where the SnowflakeLoader was consistently returning empty documents. After investigation, it was found that the query method within the SnowflakeLoader was not properly fetching and processing the data. **Changes:** 1. Modified the query method in SnowflakeLoader to handle data fetch and processing more accurately. 2. Enhanced error handling within the SnowflakeLoader to catch and log potential issues that may arise during data loading. **Impact:** This fix will ensure the SnowflakeLoader reliably returns the expected documents instead of empty ones, improving the efficiency and reliability of data processing tasks in the LangChain project. Before Fix: `[ Document(page_content='', metadata={}), Document(page_content='', metadata={}), Document(page_content='', metadata={}), Document(page_content='', metadata={}), Document(page_content='', metadata={}), Document(page_content='', metadata={}), Document(page_content='', metadata={}), Document(page_content='', metadata={}), Document(page_content='', metadata={}), Document(page_content='', metadata={}) ]` After Fix: `[Document(page_content='CUSTOMER_ID: 1\nFIRST_NAME: John\nLAST_NAME: Doe\nEMAIL: john.doe@example.com\nPHONE: 555-123-4567\nADDRESS: 123 Elm St, San Francisco, CA 94102', metadata={}), Document(page_content='CUSTOMER_ID: 2\nFIRST_NAME: Jane\nLAST_NAME: Doe\nEMAIL: jane.doe@example.com\nPHONE: 555-987-6543\nADDRESS: 456 Oak St, San Francisco, CA 94103', metadata={}), Document(page_content='CUSTOMER_ID: 3\nFIRST_NAME: Michael\nLAST_NAME: Smith\nEMAIL: michael.smith@example.com\nPHONE: 555-234-5678\nADDRESS: 789 Pine St, San Francisco, CA 94104', metadata={}), Document(page_content='CUSTOMER_ID: 4\nFIRST_NAME: Emily\nLAST_NAME: Johnson\nEMAIL: emily.johnson@example.com\nPHONE: 555-345-6789\nADDRESS: 321 Maple St, San Francisco, CA 94105', metadata={}), Document(page_content='CUSTOMER_ID: 5\nFIRST_NAME: David\nLAST_NAME: Williams\nEMAIL: david.williams@example.com\nPHONE: 555-456-7890\nADDRESS: 654 Birch St, San Francisco, CA 94106', metadata={}), Document(page_content='CUSTOMER_ID: 6\nFIRST_NAME: Emma\nLAST_NAME: Jones\nEMAIL: emma.jones@example.com\nPHONE: 555-567-8901\nADDRESS: 987 Cedar St, San Francisco, CA 94107', metadata={}), Document(page_content='CUSTOMER_ID: 7\nFIRST_NAME: Oliver\nLAST_NAME: Brown\nEMAIL: oliver.brown@example.com\nPHONE: 555-678-9012\nADDRESS: 147 Cherry St, San Francisco, CA 94108', metadata={}), Document(page_content='CUSTOMER_ID: 8\nFIRST_NAME: Sophia\nLAST_NAME: Davis\nEMAIL: sophia.davis@example.com\nPHONE: 555-789-0123\nADDRESS: 369 Walnut St, San Francisco, CA 94109', metadata={}), Document(page_content='CUSTOMER_ID: 9\nFIRST_NAME: James\nLAST_NAME: Taylor\nEMAIL: james.taylor@example.com\nPHONE: 555-890-1234\nADDRESS: 258 Hawthorn St, San Francisco, CA 94110', metadata={}), Document(page_content='CUSTOMER_ID: 10\nFIRST_NAME: Isabella\nLAST_NAME: Wilson\nEMAIL: isabella.wilson@example.com\nPHONE: 555-901-2345\nADDRESS: 963 Aspen St, San Francisco, CA 94111', metadata={})] ` **Tests:** All unit and integration tests have been run and passed successfully. Additional tests were added to validate the new behavior of the SnowflakeLoader. **Checklist:** - [x] Code changes are covered by tests - [x] Code passes `make format` and `make lint` - [x] This PR does not introduce any breaking changes Please review and let me know if any changes are required. --- langchain/document_loaders/snowflake_loader.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/langchain/document_loaders/snowflake_loader.py b/langchain/document_loaders/snowflake_loader.py index b76a7426096a3..59164124dc08c 100644 --- a/langchain/document_loaders/snowflake_loader.py +++ b/langchain/document_loaders/snowflake_loader.py @@ -53,13 +53,14 @@ def __init__( self.database = database self.schema = schema self.parameters = parameters - self.page_content_columns = page_content_columns - self.metadata_columns = metadata_columns + self.page_content_columns = ( + page_content_columns if page_content_columns is not None else ["*"] + ) + self.metadata_columns = metadata_columns if metadata_columns is not None else [] def _execute_query(self) -> List[Dict[str, Any]]: try: import snowflake.connector - from snowflake.connector import DictCursor except ImportError as ex: raise ValueError( "Could not import snowflake-connector-python package. " @@ -77,14 +78,13 @@ def _execute_query(self) -> List[Dict[str, Any]]: parameters=self.parameters, ) try: - cur = conn.cursor(DictCursor) + cur = conn.cursor() cur.execute("USE DATABASE " + self.database) cur.execute("USE SCHEMA " + self.schema) cur.execute(self.query, self.parameters) query_result = cur.fetchall() - query_result = [ - {k.lower(): v for k, v in item.items()} for item in query_result - ] + column_names = [column[0] for column in cur.description] + query_result = [dict(zip(column_names, row)) for row in query_result] except Exception as e: print(f"An error occurred: {e}") query_result = [] @@ -111,6 +111,8 @@ def lazy_load(self) -> Iterator[Document]: print(f"An error occurred during the query: {query_result}") return [] page_content_columns, metadata_columns = self._get_columns(query_result) + if "*" in page_content_columns: + page_content_columns = list(query_result[0].keys()) for row in query_result: page_content = "\n".join( f"{k}: {v}" for k, v in row.items() if k in page_content_columns @@ -120,4 +122,5 @@ def lazy_load(self) -> Iterator[Document]: yield doc def load(self) -> List[Document]: + """Load data into document objects.""" return list(self.lazy_load()) From 5f356b99939125a8c52cd0f370f1d1ff4aaa5f53 Mon Sep 17 00:00:00 2001 From: constDave Date: Sat, 10 Jun 2023 16:31:58 -0500 Subject: [PATCH 08/46] Fixed typo missing "use" (#5991) --- docs/modules/indexes/retrievers/examples/vectorstore.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/modules/indexes/retrievers/examples/vectorstore.ipynb b/docs/modules/indexes/retrievers/examples/vectorstore.ipynb index 73cc0220a8f31..424d1276b9f4b 100644 --- a/docs/modules/indexes/retrievers/examples/vectorstore.ipynb +++ b/docs/modules/indexes/retrievers/examples/vectorstore.ipynb @@ -99,13 +99,14 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "2d958271", "metadata": {}, "source": [ "## Similarity Score Threshold Retrieval\n", "\n", - "You can also a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold" + "You can also use a retrieval method that sets a similarity score threshold and only returns documents with a score above that threshold" ] }, { From ca1afa72138910d41d0e0e7f2c3e641dbfc3bb4a Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sat, 10 Jun 2023 14:37:26 -0700 Subject: [PATCH 09/46] add test for structured tools (#5989) --- tests/unit_tests/tools/test_base.py | 32 +++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/unit_tests/tools/test_base.py b/tests/unit_tests/tools/test_base.py index 6638a095317b5..cea017d79d271 100644 --- a/tests/unit_tests/tools/test_base.py +++ b/tests/unit_tests/tools/test_base.py @@ -556,3 +556,35 @@ async def test_async_exception_handling_non_tool_exception() -> None: _tool = _FakeExceptionTool(exception=ValueError()) with pytest.raises(ValueError): await _tool.arun({}) + + +def test_structured_tool_from_function() -> None: + """Test that structured tools can be created from functions.""" + + def foo(bar: int, baz: str) -> str: + """Docstring + Args: + bar: int + baz: str + """ + raise NotImplementedError() + + structured_tool = StructuredTool.from_function(foo) + assert structured_tool.name == "foo" + assert structured_tool.args == { + "bar": {"title": "Bar", "type": "integer"}, + "baz": {"title": "Baz", "type": "string"}, + } + + assert structured_tool.args_schema.schema() == { + "properties": { + "bar": {"title": "Bar", "type": "integer"}, + "baz": {"title": "Baz", "type": "string"}, + }, + "title": "fooSchemaSchema", + "type": "object", + } + + prefix = "foo(bar: int, baz: str) -> str - " + assert foo.__doc__ is not None + assert structured_tool.description == prefix + foo.__doc__.strip() From 0ca37e613c7ff3e0518f7a15bc53aa1fee006689 Mon Sep 17 00:00:00 2001 From: Daniel Grittner Date: Sat, 10 Jun 2023 23:38:20 +0200 Subject: [PATCH 10/46] Fix handling of missing action & input for async MRKL agent (#5985) Hi, This is a fix for https://github.com/hwchase17/langchain/pull/5014. This PR forgot to add the ability to self solve the ValueError(f"Could not parse LLM output: {llm_output}") error for `_atake_next_step`. --- langchain/agents/agent.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/langchain/agents/agent.py b/langchain/agents/agent.py index a1da5e7a24c51..0056d10fbce19 100644 --- a/langchain/agents/agent.py +++ b/langchain/agents/agent.py @@ -864,7 +864,11 @@ async def _atake_next_step( raise e text = str(e) if isinstance(self.handle_parsing_errors, bool): - observation = "Invalid or incomplete response" + if e.send_to_llm: + observation = str(e.observation) + text = str(e.llm_output) + else: + observation = "Invalid or incomplete response" elif isinstance(self.handle_parsing_errors, str): observation = self.handle_parsing_errors elif callable(self.handle_parsing_errors): From d5819a7ca728c01f0b3ef9fba493ff964990ce54 Mon Sep 17 00:00:00 2001 From: Tomaz Bratanic Date: Sat, 10 Jun 2023 23:39:55 +0200 Subject: [PATCH 11/46] Add additional parameters to Graph Cypher Chain (#5979) Based on the inspiration from the SQL chain, the following three parameters are added to Graph Cypher Chain. - top_k: Limited the number of results from the database to be used as context - return_direct: Return database results without transforming them to natural language - return_intermediate_steps: Return intermediate steps --- .../chains/examples/graph_cypher_qa.ipynb | 178 +++++++++++++++++- langchain/chains/graph_qa/cypher.py | 48 +++-- langchain/graphs/neo4j_graph.py | 3 +- .../chains/test_graph_database.py | 110 +++++++++++ 4 files changed, 322 insertions(+), 17 deletions(-) diff --git a/docs/modules/chains/examples/graph_cypher_qa.ipynb b/docs/modules/chains/examples/graph_cypher_qa.ipynb index b93bf64ee70a5..a36aafb0743f5 100644 --- a/docs/modules/chains/examples/graph_cypher_qa.ipynb +++ b/docs/modules/chains/examples/graph_cypher_qa.ipynb @@ -177,7 +177,7 @@ "\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n", "RETURN a.name\u001b[0m\n", "Full Context:\n", - "\u001b[32;1m\u001b[1;3m[{'a.name': 'Tom Cruise'}, {'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}]\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m[{'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Tom Cruise'}]\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] @@ -185,7 +185,7 @@ { "data": { "text/plain": [ - "'Tom Cruise, Val Kilmer, Anthony Edwards, and Meg Ryan played in Top Gun.'" + "'Val Kilmer, Anthony Edwards, Meg Ryan, and Tom Cruise played in Top Gun.'" ] }, "execution_count": 7, @@ -197,10 +197,180 @@ "chain.run(\"Who played in Top Gun?\")" ] }, + { + "cell_type": "markdown", + "id": "2d28c4df", + "metadata": {}, + "source": [ + "## Limit the number of results\n", + "You can limit the number of results from the Cypher QA Chain using the `top_k` parameter.\n", + "The default is 10." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "df230946", + "metadata": {}, + "outputs": [], + "source": [ + "chain = GraphCypherQAChain.from_llm(\n", + " ChatOpenAI(temperature=0), graph=graph, verbose=True, top_k=2\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "3f1600ee", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n", + "Generated Cypher:\n", + "\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n", + "RETURN a.name\u001b[0m\n", + "Full Context:\n", + "\u001b[32;1m\u001b[1;3m[{'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}]\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "'Val Kilmer and Anthony Edwards played in Top Gun.'" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chain.run(\"Who played in Top Gun?\")" + ] + }, + { + "cell_type": "markdown", + "id": "88c16206", + "metadata": {}, + "source": [ + "## Return intermediate results\n", + "You can return intermediate steps from the Cypher QA Chain using the `return_intermediate_steps` parameter" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "e412f36b", + "metadata": {}, + "outputs": [], + "source": [ + "chain = GraphCypherQAChain.from_llm(\n", + " ChatOpenAI(temperature=0), graph=graph, verbose=True, return_intermediate_steps=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "4f4699dc", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n", + "Generated Cypher:\n", + "\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n", + "RETURN a.name\u001b[0m\n", + "Full Context:\n", + "\u001b[32;1m\u001b[1;3m[{'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Tom Cruise'}]\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n", + "Intermediate steps: [{'query': \"MATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\\nRETURN a.name\"}, {'context': [{'a.name': 'Val Kilmer'}, {'a.name': 'Anthony Edwards'}, {'a.name': 'Meg Ryan'}, {'a.name': 'Tom Cruise'}]}]\n", + "Final answer: Val Kilmer, Anthony Edwards, Meg Ryan, and Tom Cruise played in Top Gun.\n" + ] + } + ], + "source": [ + "result = chain(\"Who played in Top Gun?\")\n", + "print(f\"Intermediate steps: {result['intermediate_steps']}\")\n", + "print(f\"Final answer: {result['result']}\")" + ] + }, + { + "cell_type": "markdown", + "id": "d6e1b054", + "metadata": {}, + "source": [ + "## Return direct results\n", + "You can return direct results from the Cypher QA Chain using the `return_direct` parameter" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "2d3acf10", + "metadata": {}, + "outputs": [], + "source": [ + "chain = GraphCypherQAChain.from_llm(\n", + " ChatOpenAI(temperature=0), graph=graph, verbose=True, return_direct=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "b0a9d143", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new GraphCypherQAChain chain...\u001b[0m\n", + "Generated Cypher:\n", + "\u001b[32;1m\u001b[1;3mMATCH (a:Actor)-[:ACTED_IN]->(m:Movie {name: 'Top Gun'})\n", + "RETURN a.name\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "[{'a.name': 'Val Kilmer'},\n", + " {'a.name': 'Anthony Edwards'},\n", + " {'a.name': 'Meg Ryan'},\n", + " {'a.name': 'Tom Cruise'}]" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chain.run(\"Who played in Top Gun?\")" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "b4825316", + "id": "74d0a36f", "metadata": {}, "outputs": [], "source": [] @@ -222,7 +392,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.8.8" } }, "nbformat": 4, diff --git a/langchain/chains/graph_qa/cypher.py b/langchain/chains/graph_qa/cypher.py index b18e5d361d4e1..39d124c7584b5 100644 --- a/langchain/chains/graph_qa/cypher.py +++ b/langchain/chains/graph_qa/cypher.py @@ -14,6 +14,8 @@ from langchain.graphs.neo4j_graph import Neo4jGraph from langchain.prompts.base import BasePromptTemplate +INTERMEDIATE_STEPS_KEY = "intermediate_steps" + def extract_cypher(text: str) -> str: # The pattern to find Cypher code enclosed in triple backticks @@ -33,6 +35,12 @@ class GraphCypherQAChain(Chain): qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: + top_k: int = 10 + """Number of results to return from the query""" + return_intermediate_steps: bool = False + """Whether or not to return the intermediate steps along with the final answer.""" + return_direct: bool = False + """Whether or not to return the result of querying the graph directly.""" @property def input_keys(self) -> List[str]: @@ -74,12 +82,14 @@ def _call( self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: + ) -> Dict[str, Any]: """Generate Cypher statement, use it to look up in db and answer question.""" _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() callbacks = _run_manager.get_child() question = inputs[self.input_key] + intermediate_steps: List = [] + generated_cypher = self.cypher_generation_chain.run( {"question": question, "schema": self.graph.get_schema}, callbacks=callbacks ) @@ -91,14 +101,30 @@ def _call( _run_manager.on_text( generated_cypher, color="green", end="\n", verbose=self.verbose ) - context = self.graph.query(generated_cypher) - _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) - _run_manager.on_text( - str(context), color="green", end="\n", verbose=self.verbose - ) - result = self.qa_chain( - {"question": question, "context": context}, - callbacks=callbacks, - ) - return {self.output_key: result[self.qa_chain.output_key]} + intermediate_steps.append({"query": generated_cypher}) + + # Retrieve and limit the number of results + context = self.graph.query(generated_cypher)[: self.top_k] + + if self.return_direct: + final_result = context + else: + _run_manager.on_text("Full Context:", end="\n", verbose=self.verbose) + _run_manager.on_text( + str(context), color="green", end="\n", verbose=self.verbose + ) + + intermediate_steps.append({"context": context}) + + result = self.qa_chain( + {"question": question, "context": context}, + callbacks=callbacks, + ) + final_result = result[self.qa_chain.output_key] + + chain_result: Dict[str, Any] = {self.output_key: final_result} + if self.return_intermediate_steps: + chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps + + return chain_result diff --git a/langchain/graphs/neo4j_graph.py b/langchain/graphs/neo4j_graph.py index 8942b2b2b19d1..e56d125c9c4a2 100644 --- a/langchain/graphs/neo4j_graph.py +++ b/langchain/graphs/neo4j_graph.py @@ -78,8 +78,7 @@ def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]: with self._driver.session(database=self._database) as session: try: data = session.run(query, params) - # Hard limit of 50 results - return [r.data() for r in data][:50] + return [r.data() for r in data] except CypherSyntaxError as e: raise ValueError("Generated Cypher Statement is not valid\n" f"{e}") diff --git a/tests/integration_tests/chains/test_graph_database.py b/tests/integration_tests/chains/test_graph_database.py index 10a00a2d0f66d..9b515f9009eda 100644 --- a/tests/integration_tests/chains/test_graph_database.py +++ b/tests/integration_tests/chains/test_graph_database.py @@ -58,3 +58,113 @@ def test_cypher_generating_run() -> None: output = chain.run("Who played in Pulp Fiction?") expected_output = " Bruce Willis played in Pulp Fiction." assert output == expected_output + + +def test_cypher_top_k() -> None: + """Test top_k parameter correctly limits the number of results in the context.""" + url = os.environ.get("NEO4J_URL") + username = os.environ.get("NEO4J_USERNAME") + password = os.environ.get("NEO4J_PASSWORD") + assert url is not None + assert username is not None + assert password is not None + + TOP_K = 1 + + graph = Neo4jGraph( + url=url, + username=username, + password=password, + ) + # Delete all nodes in the graph + graph.query("MATCH (n) DETACH DELETE n") + # Create two nodes and a relationship + graph.query( + "CREATE (a:Actor {name:'Bruce Willis'})" + "-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})" + "<-[:ACTED_IN]-(:Actor {name:'Foo'})" + ) + # Refresh schema information + graph.refresh_schema() + + chain = GraphCypherQAChain.from_llm( + OpenAI(temperature=0), graph=graph, return_direct=True, top_k=TOP_K + ) + output = chain.run("Who played in Pulp Fiction?") + assert len(output) == TOP_K + + +def test_cypher_intermediate_steps() -> None: + """Test the returning of the intermediate steps.""" + url = os.environ.get("NEO4J_URL") + username = os.environ.get("NEO4J_USERNAME") + password = os.environ.get("NEO4J_PASSWORD") + assert url is not None + assert username is not None + assert password is not None + + graph = Neo4jGraph( + url=url, + username=username, + password=password, + ) + # Delete all nodes in the graph + graph.query("MATCH (n) DETACH DELETE n") + # Create two nodes and a relationship + graph.query( + "CREATE (a:Actor {name:'Bruce Willis'})" + "-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})" + ) + # Refresh schema information + graph.refresh_schema() + + chain = GraphCypherQAChain.from_llm( + OpenAI(temperature=0), graph=graph, return_intermediate_steps=True + ) + output = chain("Who played in Pulp Fiction?") + + expected_output = " Bruce Willis played in Pulp Fiction." + assert output["result"] == expected_output + + query = output["intermediate_steps"][0]["query"] + expected_query = ( + "\n\nMATCH (a:Actor)-[:ACTED_IN]->" + "(m:Movie {title: 'Pulp Fiction'}) RETURN a.name" + ) + assert query == expected_query + + context = output["intermediate_steps"][1]["context"] + expected_context = [{"a.name": "Bruce Willis"}] + assert context == expected_context + + +def test_cypher_return_direct() -> None: + """Test that chain returns direct results.""" + url = os.environ.get("NEO4J_URL") + username = os.environ.get("NEO4J_USERNAME") + password = os.environ.get("NEO4J_PASSWORD") + assert url is not None + assert username is not None + assert password is not None + + graph = Neo4jGraph( + url=url, + username=username, + password=password, + ) + # Delete all nodes in the graph + graph.query("MATCH (n) DETACH DELETE n") + # Create two nodes and a relationship + graph.query( + "CREATE (a:Actor {name:'Bruce Willis'})" + "-[:ACTED_IN]->(:Movie {title: 'Pulp Fiction'})" + ) + # Refresh schema information + graph.refresh_schema() + + chain = GraphCypherQAChain.from_llm( + OpenAI(temperature=0), graph=graph, return_direct=True + ) + output = chain.run("Who played in Pulp Fiction?") + expected_output = [{"a.name": "Bruce Willis"}] + assert output == expected_output From 9218684759c54b1155c99a627477491772806620 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sat, 10 Jun 2023 15:42:32 -0700 Subject: [PATCH 12/46] Add a new vector store - AwaDB (#5971) (#5992) Added AwaDB vector store, which is a wrapper over the AwaDB, that can be used as a vector storage and has an efficient similarity search. Added integration tests for the vector store Added jupyter notebook with the example Delete a unneeded empty file and resolve the conflict(https://github.com/hwchase17/langchain/pull/5886) Please check, Thanks! @dev2049 @hwchase17 --------- Fixes # (issue) #### Before submitting #### Who can review? Tag maintainers/contributors who might be interested: --------- Co-authored-by: ljeagle Co-authored-by: vincent --- docs/integrations/awadb.md | 21 ++ .../indexes/vectorstores/examples/awadb.ipynb | 194 ++++++++++++ langchain/vectorstores/__init__.py | 2 + langchain/vectorstores/awadb.py | 284 ++++++++++++++++++ poetry.lock | 61 +++- pyproject.toml | 2 + .../vectorstores/test_awadb.py | 55 ++++ 7 files changed, 617 insertions(+), 2 deletions(-) create mode 100644 docs/integrations/awadb.md create mode 100644 docs/modules/indexes/vectorstores/examples/awadb.ipynb create mode 100644 langchain/vectorstores/awadb.py create mode 100644 tests/integration_tests/vectorstores/test_awadb.py diff --git a/docs/integrations/awadb.md b/docs/integrations/awadb.md new file mode 100644 index 0000000000000..fc940bcd53d60 --- /dev/null +++ b/docs/integrations/awadb.md @@ -0,0 +1,21 @@ +# AwaDB + +>[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications. + +## Installation and Setup + +```bash +pip install awadb +``` + + +## VectorStore + +There exists a wrapper around AwaDB vector databases, allowing you to use it as a vectorstore, +whether for semantic search or example selection. + +```python +from langchain.vectorstores import AwaDB +``` + +For a more detailed walkthrough of the AwaDB wrapper, see [this notebook](../modules/indexes/vectorstores/examples/awadb.ipynb) diff --git a/docs/modules/indexes/vectorstores/examples/awadb.ipynb b/docs/modules/indexes/vectorstores/examples/awadb.ipynb new file mode 100644 index 0000000000000..be1b40ee3525e --- /dev/null +++ b/docs/modules/indexes/vectorstores/examples/awadb.ipynb @@ -0,0 +1,194 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "833c4789", + "metadata": {}, + "source": [ + "# AwaDB\n", + "[AwaDB](https://github.com/awa-ai/awadb) is an AI Native database for the search and storage of embedding vectors used by LLM Applications.\n", + "This notebook shows how to use functionality related to the AwaDB." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "252930ea", + "metadata": {}, + "outputs": [], + "source": [ + "!pip install awadb" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f2b71a47", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import AwaDB\n", + "from langchain.document_loaders import TextLoader" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49be0bac", + "metadata": {}, + "outputs": [], + "source": [ + "loader = TextLoader('../../../state_of_the_union.txt')\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size= 100, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18714278", + "metadata": {}, + "outputs": [], + "source": [ + "db = AwaDB.from_documents(docs)\n", + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "docs = db.similarity_search(query)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "62b7a4c5", + "metadata": {}, + "outputs": [], + "source": [ + "print(docs[0].page_content)" + ] + }, + { + "cell_type": "markdown", + "id": "a9b4be48", + "metadata": {}, + "source": [ + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence." + ] + }, + { + "cell_type": "markdown", + "id": "87fec6b5", + "metadata": {}, + "source": [ + "## Similarity search with score" + ] + }, + { + "cell_type": "markdown", + "id": "17231924", + "metadata": {}, + "source": [ + "The returned distance score is between 0-1. 0 is dissimilar, 1 is the most similar" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f40ddae1", + "metadata": {}, + "outputs": [], + "source": [ + "docs = db.similarity_search_with_score(query)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f0045583", + "metadata": {}, + "outputs": [], + "source": [ + "print(docs[0])" + ] + }, + { + "cell_type": "markdown", + "id": "8c2da99d", + "metadata": {}, + "source": [ + "(Document(page_content='And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'}), 0.561813814013747)" + ] + }, + { + "cell_type": "markdown", + "id": "0b49fb59", + "metadata": {}, + "source": [ + "## Restore the table created and added data before" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1bfa6e25", + "metadata": {}, + "outputs": [], + "source": [ + "AwaDB automatically persists added document data" + ] + }, + { + "cell_type": "markdown", + "id": "2a0f3b35", + "metadata": {}, + "source": [ + "If you can restore the table you created and added before, you can just do this as below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1fd4b5b0", + "metadata": {}, + "outputs": [], + "source": [ + "awadb_client = awadb.Client()\n", + "ret = awadb_client.Load('langchain_awadb')\n", + "if ret : print('awadb load table success')\n", + "else:\n", + " print('awadb load table failed')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5ae9a9dd", + "metadata": {}, + "outputs": [], + "source": [ + "awadb load table success" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/vectorstores/__init__.py b/langchain/vectorstores/__init__.py index c2c37933767e9..3a932c23825c8 100644 --- a/langchain/vectorstores/__init__.py +++ b/langchain/vectorstores/__init__.py @@ -2,6 +2,7 @@ from langchain.vectorstores.analyticdb import AnalyticDB from langchain.vectorstores.annoy import Annoy from langchain.vectorstores.atlas import AtlasDB +from langchain.vectorstores.awadb import AwaDB from langchain.vectorstores.base import VectorStore from langchain.vectorstores.chroma import Chroma from langchain.vectorstores.clickhouse import Clickhouse, ClickhouseSettings @@ -60,4 +61,5 @@ "ClickhouseSettings", "Tigris", "MatchingEngine", + "AwaDB", ] diff --git a/langchain/vectorstores/awadb.py b/langchain/vectorstores/awadb.py new file mode 100644 index 0000000000000..9c7d8a385bf8e --- /dev/null +++ b/langchain/vectorstores/awadb.py @@ -0,0 +1,284 @@ +"""Wrapper around AwaDB for embedding vectors""" +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Tuple, Type + +from langchain.docstore.document import Document +from langchain.embeddings.base import Embeddings +from langchain.vectorstores.base import VectorStore + +# from pydantic import BaseModel, Field, root_validator + + +if TYPE_CHECKING: + import awadb + +logger = logging.getLogger() +DEFAULT_TOPN = 4 + + +class AwaDB(VectorStore): + """Interface implemented by AwaDB vector stores.""" + + _DEFAULT_TABLE_NAME = "langchain_awadb" + + def __init__( + self, + table_name: str = _DEFAULT_TABLE_NAME, + embedding_model: Optional[Embeddings] = None, + log_and_data_dir: Optional[str] = None, + client: Optional[awadb.Client] = None, + ) -> None: + """Initialize with AwaDB client.""" + + try: + import awadb + except ImportError: + raise ValueError( + "Could not import awadb python package. " + "Please install it with `pip install awadb`." + ) + + if client is not None: + self.awadb_client = client + else: + if log_and_data_dir is not None: + self.awadb_client = awadb.Client(log_and_data_dir) + else: + self.awadb_client = awadb.Client() + + self.awadb_client.Create(table_name) + if embedding_model is not None: + self.embedding_model = embedding_model + + self.added_doc_count = 0 + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[dict]] = None, + **kwargs: Any, + ) -> List[str]: + """Run more texts through the embeddings and add to the vectorstore. + Args: + texts: Iterable of strings to add to the vectorstore. + metadatas: Optional list of metadatas associated with the texts. + kwargs: vectorstore specific parameters + + Returns: + List of ids from adding the texts into the vectorstore. + """ + if self.awadb_client is None: + raise ValueError("AwaDB client is None!!!") + + embeddings = None + if self.embedding_model is not None: + embeddings = self.embedding_model.embed_documents(list(texts)) + added_results: List[str] = [] + doc_no = 0 + for text in texts: + doc: List[Any] = [] + if embeddings is not None: + doc.append(text) + doc.append(embeddings[doc_no]) + else: + dict_tmp = {} + dict_tmp["embedding_text"] = text + doc.append(dict_tmp) + + if metadatas is not None: + if doc_no < metadatas.__len__(): + doc.append(metadatas[doc_no]) + self.awadb_client.Add(doc) + added_results.append(str(self.added_doc_count)) + + doc_no = doc_no + 1 + self.added_doc_count = self.added_doc_count + 1 + + return added_results + + def load_local( + self, + table_name: str = _DEFAULT_TABLE_NAME, + **kwargs: Any, + ) -> bool: + if self.awadb_client is None: + raise ValueError("AwaDB client is None!!!") + + return self.awadb_client.Load(table_name) + + def similarity_search( + self, + query: str, + k: int = DEFAULT_TOPN, + **kwargs: Any, + ) -> List[Document]: + """Return docs most similar to query.""" + if self.awadb_client is None: + raise ValueError("AwaDB client is None!!!") + + embedding = None + if self.embedding_model is not None: + embedding = self.embedding_model.embed_query(query) + + return self.similarity_search_by_vector(embedding, k) + + def similarity_search_with_score( + self, + query: str, + k: int = DEFAULT_TOPN, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Return docs and relevance scores, normalized on a scale from 0 to 1. + + 0 is dissimilar, 1 is most similar. + """ + + if self.awadb_client is None: + raise ValueError("AwaDB client is None!!!") + + embedding = None + if self.embedding_model is not None: + embedding = self.embedding_model.embed_query(query) + + show_results = self.awadb_client.Search(embedding, k) + + results: List[Tuple[Document, float]] = [] + + if show_results.__len__() == 0: + return results + + scores: List[float] = [] + retrieval_docs = self.similarity_search_by_vector(embedding, k, scores) + + L2_Norm = 0.0 + for score in scores: + L2_Norm = L2_Norm + score * score + + L2_Norm = pow(L2_Norm, 0.5) + doc_no = 0 + for doc in retrieval_docs: + doc_tuple = (doc, 1 - scores[doc_no] / L2_Norm) + results.append(doc_tuple) + doc_no = doc_no + 1 + + return results + + def similarity_search_with_relevance_scores( + self, + query: str, + k: int = DEFAULT_TOPN, + **kwargs: Any, + ) -> List[Tuple[Document, float]]: + """Return docs and relevance scores, normalized on a scale from 0 to 1. + + 0 is dissimilar, 1 is most similar. + """ + + if self.awadb_client is None: + raise ValueError("AwaDB client is None!!!") + + embedding = None + if self.embedding_model is not None: + embedding = self.embedding_model.embed_query(query) + + show_results = self.awadb_client.Search(embedding, k) + + results: List[Tuple[Document, float]] = [] + + if show_results.__len__() == 0: + return results + + scores: List[float] = [] + retrieval_docs = self.similarity_search_by_vector(embedding, k, scores) + + L2_Norm = 0.0 + for score in scores: + L2_Norm = L2_Norm + score * score + + L2_Norm = pow(L2_Norm, 0.5) + doc_no = 0 + for doc in retrieval_docs: + doc_tuple = (doc, 1 - scores[doc_no] / L2_Norm) + results.append(doc_tuple) + doc_no = doc_no + 1 + + return results + + def similarity_search_by_vector( + self, + embedding: List[float], + k: int = DEFAULT_TOPN, + scores: Optional[list] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs most similar to embedding vector. + + Args: + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + + Returns: + List of Documents most similar to the query vector. + """ + + if self.awadb_client is None: + raise ValueError("AwaDB client is None!!!") + + show_results = self.awadb_client.Search(embedding, k) + + results: List[Document] = [] + + if show_results.__len__() == 0: + return results + + for item_detail in show_results[0]["ResultItems"]: + content = "" + meta_data = {} + for item_key in item_detail: + if item_key == "Field@0": # text for the document + content = item_detail[item_key] + elif item_key == "Field@1": # embedding field for the document + continue + elif item_key == "score": # L2 distance + if scores is not None: + score = item_detail[item_key] + scores.append(score) + else: + meta_data[item_key] = item_detail[item_key] + results.append(Document(page_content=content, metadata=meta_data)) + return results + + @classmethod + def from_texts( + cls: Type[AwaDB], + texts: List[str], + embedding: Optional[Embeddings] = None, + metadatas: Optional[List[dict]] = None, + table_name: str = _DEFAULT_TABLE_NAME, + logging_and_data_dir: Optional[str] = None, + client: Optional[awadb.Client] = None, + **kwargs: Any, + ) -> AwaDB: + """Create an AwaDB vectorstore from a raw documents. + + Args: + texts (List[str]): List of texts to add to the table. + embedding (Optional[Embeddings]): Embedding function. Defaults to None. + metadatas (Optional[List[dict]]): List of metadatas. Defaults to None. + table_name (str): Name of the table to create. + logging_and_data_dir (Optional[str]): Directory of logging and persistence. + client (Optional[awadb.Client]): AwaDB client + + Returns: + AwaDB: AwaDB vectorstore. + """ + awadb_client = cls( + table_name=table_name, + embedding_model=embedding, + log_and_data_dir=logging_and_data_dir, + client=client, + ) + awadb_client.add_texts(texts=texts, metadatas=metadatas) + return awadb_client diff --git a/poetry.lock b/poetry.lock index 96d1c500aa513..03242e43c1d9b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -570,6 +570,26 @@ dev = ["coverage (>=5,<6)", "flake8 (>=3,<4)", "pytest (>=6,<7)", "sphinx-copybu docs = ["sphinx-copybutton (>=0.4,<0.5)", "sphinx-rtd-theme (>=1.0,<2.0)", "sphinx-tabs (>=3,<4)", "sphinxcontrib-mermaid (>=0.7,<0.8)"] test = ["coverage (>=5,<6)", "pytest (>=6,<7)"] +[[package]] +name = "awadb" +version = "0.3.2" +description = "The AI Native database for embedding vectors" +category = "main" +optional = true +python-versions = ">=3.6" +files = [ + {file = "awadb-0.3.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:f3ce3b066198782fa413f452c56001c58ebec71a1e1dca0eee68f73321ba15a9"}, + {file = "awadb-0.3.2-cp311-cp311-macosx_10_13_universal2.whl", hash = "sha256:c96b5e263c32b2563b1fa027035bdcf50540808ad303071cc1aed3471c3c39b7"}, + {file = "awadb-0.3.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:3e43b5a74753261857d0b146543a4620e00938833181259f138f07457fa84812"}, + {file = "awadb-0.3.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:6330b4d18a814c1562113b3b7897db629c2ac9b5818236ead0fc5f3445b6b7fb"}, + {file = "awadb-0.3.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:82b4e61cc905339868a9f833d0988098f56411b42e0f8dd571aec7c8d6a3f1fa"}, + {file = "awadb-0.3.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5efaa93d69c467f16ec4f65ed250ec26015781826c0d059c8a54613a5d3e2c3e"}, + {file = "awadb-0.3.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7be0811550d72f49018e4790d290cf521f92ffa84d65ef1073e621f225d142ec"}, +] + +[package.extras] +test = ["pytest (>=6.0)"] + [[package]] name = "azure-ai-formrecognizer" version = "3.2.1" @@ -6030,14 +6050,51 @@ optional = true python-versions = ">=3.7" files = [ {file = "orjson-3.9.1-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c4434b7b786fdc394b95d029fb99949d7c2b05bbd4bf5cb5e3906be96ffeee3b"}, + {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09faf14f74ed47e773fa56833be118e04aa534956f661eb491522970b7478e3b"}, + {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:503eb86a8d53a187fe66aa80c69295a3ca35475804da89a9547e4fce5f803822"}, + {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:20f2804b5a1dbd3609c086041bd243519224d47716efd7429db6c03ed28b7cc3"}, + {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fd828e0656615a711c4cc4da70f3cac142e66a6703ba876c20156a14e28e3fa"}, + {file = "orjson-3.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec53d648176f873203b9c700a0abacab33ca1ab595066e9d616f98cdc56f4434"}, + {file = "orjson-3.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e186ae76b0d97c505500664193ddf508c13c1e675d9b25f1f4414a7606100da6"}, + {file = "orjson-3.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d4edee78503016f4df30aeede0d999b3cb11fb56f47e9db0e487bce0aaca9285"}, + {file = "orjson-3.9.1-cp310-none-win_amd64.whl", hash = "sha256:a4cc5d21e68af982d9a2528ac61e604f092c60eed27aef3324969c68f182ec7e"}, {file = "orjson-3.9.1-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:761b6efd33c49de20dd73ce64cc59da62c0dab10aa6015f582680e0663cc792c"}, + {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31229f9d0b8dc2ef7ee7e4393f2e4433a28e16582d4b25afbfccc9d68dc768f8"}, + {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b7ab18d55ecb1de543d452f0a5f8094b52282b916aa4097ac11a4c79f317b86"}, + {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db774344c39041f4801c7dfe03483df9203cbd6c84e601a65908e5552228dd25"}, + {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ae47ef8c0fe89c4677db7e9e1fb2093ca6e66c3acbee5442d84d74e727edad5e"}, + {file = "orjson-3.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:103952c21575b9805803c98add2eaecd005580a1e746292ed2ec0d76dd3b9746"}, + {file = "orjson-3.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2cb0121e6f2c9da3eddf049b99b95fef0adf8480ea7cb544ce858706cdf916eb"}, + {file = "orjson-3.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:24d4ddaa2876e657c0fd32902b5c451fd2afc35159d66a58da7837357044b8c2"}, {file = "orjson-3.9.1-cp311-none-win_amd64.whl", hash = "sha256:0b53b5f72cf536dd8aa4fc4c95e7e09a7adb119f8ff8ee6cc60f735d7740ad6a"}, {file = "orjson-3.9.1-cp37-cp37m-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d4b68d01a506242316a07f1d2f29fb0a8b36cee30a7c35076f1ef59dce0890c1"}, + {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d9dd4abe6c6fd352f00f4246d85228f6a9847d0cc14f4d54ee553718c225388f"}, + {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9e20bca5e13041e31ceba7a09bf142e6d63c8a7467f5a9c974f8c13377c75af2"}, + {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8ae0467d01eb1e4bcffef4486d964bfd1c2e608103e75f7074ed34be5df48cc"}, + {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06f6ab4697fab090517f295915318763a97a12ee8186054adf21c1e6f6abbd3d"}, + {file = "orjson-3.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8515867713301fa065c58ec4c9053ba1a22c35113ab4acad555317b8fd802e50"}, + {file = "orjson-3.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:393d0697d1dfa18d27d193e980c04fdfb672c87f7765b87952f550521e21b627"}, + {file = "orjson-3.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d96747662d3666f79119e5d28c124e7d356c7dc195cd4b09faea4031c9079dc9"}, {file = "orjson-3.9.1-cp37-none-win_amd64.whl", hash = "sha256:6d173d3921dd58a068c88ec22baea7dbc87a137411501618b1292a9d6252318e"}, {file = "orjson-3.9.1-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d1c2b0b4246c992ce2529fc610a446b945f1429445ece1c1f826a234c829a918"}, + {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19f70ba1f441e1c4bb1a581f0baa092e8b3e3ce5b2aac2e1e090f0ac097966da"}, + {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:375d65f002e686212aac42680aed044872c45ee4bc656cf63d4a215137a6124a"}, + {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4751cee4a7b1daeacb90a7f5adf2170ccab893c3ab7c5cea58b45a13f89b30b3"}, + {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78d9a2a4b2302d5ebc3695498ebc305c3568e5ad4f3501eb30a6405a32d8af22"}, + {file = "orjson-3.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46b4facc32643b2689dfc292c0c463985dac4b6ab504799cf51fc3c6959ed668"}, + {file = "orjson-3.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ec7c8a0f1bf35da0d5fd14f8956f3b82a9a6918a3c6963d718dfd414d6d3b604"}, + {file = "orjson-3.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d3a40b0fbe06ccd4d6a99e523d20b47985655bcada8d1eba485b1b32a43e4904"}, {file = "orjson-3.9.1-cp38-none-win_amd64.whl", hash = "sha256:402f9d3edfec4560a98880224ec10eba4c5f7b4791e4bc0d4f4d8df5faf2a006"}, {file = "orjson-3.9.1-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:49c0d78dcd34626e2e934f1192d7c052b94e0ecadc5f386fd2bda6d2e03dadf5"}, + {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:125f63e56d38393daa0a1a6dc6fedefca16c538614b66ea5997c3bd3af35ef26"}, + {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:08927970365d2e1f3ce4894f9ff928a7b865d53f26768f1bbdd85dd4fee3e966"}, + {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f9a744e212d4780ecd67f4b6b128b2e727bee1df03e7059cddb2dfe1083e7dc4"}, + {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d1dbf36db7240c61eec98c8d21545d671bce70be0730deb2c0d772e06b71af3"}, + {file = "orjson-3.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a1e384626f76b66df615f7bb622a79a25c166d08c5d2151ffd41f24c4cc104"}, + {file = "orjson-3.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:15d28872fb055bf17ffca913826e618af61b2f689d2b170f72ecae1a86f80d52"}, + {file = "orjson-3.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1e4d905338f9ef32c67566929dfbfbb23cc80287af8a2c38930fb0eda3d40b76"}, {file = "orjson-3.9.1-cp39-none-win_amd64.whl", hash = "sha256:48a27da6c7306965846565cc385611d03382bbd84120008653aa2f6741e2105d"}, + {file = "orjson-3.9.1.tar.gz", hash = "sha256:db373a25ec4a4fccf8186f9a72a1b3442837e40807a736a815ab42481e83b7d0"}, ] [[package]] @@ -11366,7 +11423,7 @@ cffi = {version = ">=1.11", markers = "platform_python_implementation == \"PyPy\ cffi = ["cffi (>=1.11)"] [extras] -all = ["anthropic", "cohere", "openai", "nlpcloud", "huggingface_hub", "jina", "manifest-ml", "elasticsearch", "opensearch-py", "google-search-results", "faiss-cpu", "sentence-transformers", "transformers", "spacy", "nltk", "wikipedia", "beautifulsoup4", "tiktoken", "torch", "jinja2", "pinecone-client", "pinecone-text", "pymongo", "weaviate-client", "redis", "google-api-python-client", "google-auth", "wolframalpha", "qdrant-client", "tensorflow-text", "pypdf", "networkx", "nomic", "aleph-alpha-client", "deeplake", "pgvector", "psycopg2-binary", "pyowm", "pytesseract", "html2text", "atlassian-python-api", "gptcache", "duckduckgo-search", "arxiv", "azure-identity", "clickhouse-connect", "azure-cosmos", "lancedb", "langkit", "lark", "pexpect", "pyvespa", "O365", "jq", "docarray", "steamship", "pdfminer-six", "lxml", "requests-toolbelt", "neo4j", "openlm", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "momento", "singlestoredb", "tigrisdb", "nebula3-python"] +all = ["anthropic", "cohere", "openai", "nlpcloud", "huggingface_hub", "jina", "manifest-ml", "elasticsearch", "opensearch-py", "google-search-results", "faiss-cpu", "sentence-transformers", "transformers", "spacy", "nltk", "wikipedia", "beautifulsoup4", "tiktoken", "torch", "jinja2", "pinecone-client", "pinecone-text", "pymongo", "weaviate-client", "redis", "google-api-python-client", "google-auth", "wolframalpha", "qdrant-client", "tensorflow-text", "pypdf", "networkx", "nomic", "aleph-alpha-client", "deeplake", "pgvector", "psycopg2-binary", "pyowm", "pytesseract", "html2text", "atlassian-python-api", "gptcache", "duckduckgo-search", "arxiv", "azure-identity", "clickhouse-connect", "azure-cosmos", "lancedb", "langkit", "lark", "pexpect", "pyvespa", "O365", "jq", "docarray", "steamship", "pdfminer-six", "lxml", "requests-toolbelt", "neo4j", "openlm", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "momento", "singlestoredb", "tigrisdb", "nebula3-python", "awadb"] azure = ["azure-identity", "azure-cosmos", "openai", "azure-core", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech"] cohere = ["cohere"] docarray = ["docarray"] @@ -11380,4 +11437,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "dbbaa2907bf2ac09ed111ce712772bba0fe56901627f41c53aef71ae5a38d1c6" +content-hash = "ecf7086e83cc0ff19e6851c0b63170b082b267c1c1c00f47700fd3a8c8bb46c5" diff --git a/pyproject.toml b/pyproject.toml index f0f68ae3a4e7f..40fc00be451ed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -106,6 +106,7 @@ pyspark = {version = "^3.4.0", optional = true} tigrisdb = {version = "^1.0.0b6", optional = true} nebula3-python = {version = "^3.4.0", optional = true} langchainplus-sdk = ">=0.0.7" +awadb = {version = "^0.3.2", optional = true} [tool.poetry.group.docs.dependencies] @@ -286,6 +287,7 @@ all = [ "singlestoredb", "tigrisdb", "nebula3-python", + "awadb", ] # An extra used to be able to add extended testing. diff --git a/tests/integration_tests/vectorstores/test_awadb.py b/tests/integration_tests/vectorstores/test_awadb.py new file mode 100644 index 0000000000000..b643f682de73a --- /dev/null +++ b/tests/integration_tests/vectorstores/test_awadb.py @@ -0,0 +1,55 @@ +"""Test AwaDB functionality.""" +from langchain.docstore.document import Document +from langchain.vectorstores import AwaDB +from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings + + +def test_awadb() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + docsearch = AwaDB.from_texts( + table_name="test_awadb", texts=texts, embedding=FakeEmbeddings() + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + +def test_awadb_with_metadatas() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": i} for i in range(len(texts))] + docsearch = AwaDB.from_texts( + table_name="test_awadb", + texts=texts, + embedding=FakeEmbeddings(), + metadatas=metadatas, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo", metadata={"page": 0})] + + +def test_awadb_with_metadatas_with_scores() -> None: + """Test end to end construction and scored search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = AwaDB.from_texts( + table_name="test_awadb", + texts=texts, + embedding=FakeEmbeddings(), + metadatas=metadatas, + ) + output = docsearch.similarity_search_with_score("foo", k=1) + assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] + + +def test_awadb_add_texts() -> None: + """Test end to end adding of texts.""" + # Create initial doc store. + texts = ["foo", "bar", "baz"] + docsearch = AwaDB.from_texts( + table_name="test_awadb", texts=texts, embedding=FakeEmbeddings() + ) + # Test adding a similar document as before. + docsearch.add_texts(["foo"]) + output = docsearch.similarity_search("foo", k=2) + assert output == [Document(page_content="foo"), Document(page_content="foo")] From 21bd16bb59769eed908cbcb25d9f3e1f0d68de65 Mon Sep 17 00:00:00 2001 From: Lance Martin <122662504+rlancemartin@users.noreply.github.com> Date: Sat, 10 Jun 2023 15:43:18 -0700 Subject: [PATCH 13/46] Create Airtable loader (#5958) Create document loader for Airtable --- docs/modules/indexes/document_loaders.rst | 1 + .../document_loaders/examples/airtable.ipynb | 142 ++++++++++++++++++ langchain/document_loaders/__init__.py | 2 + langchain/document_loaders/airtable.py | 36 +++++ 4 files changed, 181 insertions(+) create mode 100644 docs/modules/indexes/document_loaders/examples/airtable.ipynb create mode 100644 langchain/document_loaders/airtable.py diff --git a/docs/modules/indexes/document_loaders.rst b/docs/modules/indexes/document_loaders.rst index 190abbef2442e..df46be786a59d 100644 --- a/docs/modules/indexes/document_loaders.rst +++ b/docs/modules/indexes/document_loaders.rst @@ -30,6 +30,7 @@ For detailed instructions on how to get set up with Unstructured, see installati :maxdepth: 1 :glob: + ./document_loaders/examples/airtable.ipynb ./document_loaders/examples/audio.ipynb ./document_loaders/examples/conll-u.ipynb ./document_loaders/examples/copypaste.ipynb diff --git a/docs/modules/indexes/document_loaders/examples/airtable.ipynb b/docs/modules/indexes/document_loaders/examples/airtable.ipynb new file mode 100644 index 0000000000000..decabe8ed59a5 --- /dev/null +++ b/docs/modules/indexes/document_loaders/examples/airtable.ipynb @@ -0,0 +1,142 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "7ae421e6", + "metadata": {}, + "source": [ + "# Airtable" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "98aea00d", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install pyairtable" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "592483eb", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.document_loaders import AirtableLoader" + ] + }, + { + "cell_type": "markdown", + "id": "637e1205", + "metadata": {}, + "source": [ + "* Get your API key [here](https://support.airtable.com/docs/creating-and-using-api-keys-and-access-tokens).\n", + "* Get ID of your base [here](https://airtable.com/developers/web/api/introduction).\n", + "* Get your table ID from the table url as shown [here](https://www.highviewapps.com/kb/where-can-i-find-the-airtable-base-id-and-table-id/#:~:text=Both%20the%20Airtable%20Base%20ID,URL%20that%20begins%20with%20tbl)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c12a7aff", + "metadata": {}, + "outputs": [], + "source": [ + "api_key=\"xxx\"\n", + "base_id=\"xxx\"\n", + "table_id=\"xxx\"" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "ccddd5a6", + "metadata": {}, + "outputs": [], + "source": [ + "loader = AirtableLoader(api_key,table_id,base_id)\n", + "docs = loader.load()" + ] + }, + { + "cell_type": "markdown", + "id": "ae76c25c", + "metadata": {}, + "source": [ + "Returns each table row as `dict`." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "7abec7ce", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "3" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(docs)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "403c95da", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'id': 'recF3GbGZCuh9sXIQ',\n", + " 'createdTime': '2023-06-09T04:47:21.000Z',\n", + " 'fields': {'Priority': 'High',\n", + " 'Status': 'In progress',\n", + " 'Name': 'Document Splitters'}}" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "eval(docs[0].page_content)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/document_loaders/__init__.py b/langchain/document_loaders/__init__.py index 393062b7a5db0..d533159af0441 100644 --- a/langchain/document_loaders/__init__.py +++ b/langchain/document_loaders/__init__.py @@ -1,6 +1,7 @@ """All different types of document loaders.""" from langchain.document_loaders.airbyte_json import AirbyteJSONLoader +from langchain.document_loaders.airtable import AirtableLoader from langchain.document_loaders.apify_dataset import ApifyDatasetLoader from langchain.document_loaders.arxiv import ArxivLoader from langchain.document_loaders.azlyrics import AZLyricsLoader @@ -135,6 +136,7 @@ __all__ = [ "AZLyricsLoader", "AirbyteJSONLoader", + "AirtableLoader", "ApifyDatasetLoader", "ArxivLoader", "AzureBlobStorageContainerLoader", diff --git a/langchain/document_loaders/airtable.py b/langchain/document_loaders/airtable.py new file mode 100644 index 0000000000000..3dfaf40c4fc93 --- /dev/null +++ b/langchain/document_loaders/airtable.py @@ -0,0 +1,36 @@ +from typing import Iterator, List + +from langchain.docstore.document import Document +from langchain.document_loaders.base import BaseLoader + + +class AirtableLoader(BaseLoader): + """Loader that loads local airbyte json files.""" + + def __init__(self, api_token: str, table_id: str, base_id: str): + """Initialize with API token and the IDs for table and base""" + self.api_token = api_token + self.table_id = table_id + self.base_id = base_id + + def lazy_load(self) -> Iterator[Document]: + """Load Table.""" + + from pyairtable import Table + + table = Table(self.api_token, self.base_id, self.table_id) + records = table.all() + for record in records: + # Need to convert record from dict to str + yield Document( + page_content=str(record), + metadata={ + "source": self.base_id + "_" + self.table_id, + "base_id": self.base_id, + "table_id": self.table_id, + }, + ) + + def load(self) -> List[Document]: + """Load Table.""" + return list(self.lazy_load()) From e4224a396b07b29dab1c03ffa46efcc58ffa68e5 Mon Sep 17 00:00:00 2001 From: qued <64741807+qued@users.noreply.github.com> Date: Sat, 10 Jun 2023 18:24:42 -0500 Subject: [PATCH 14/46] feat: Add `UnstructuredXMLLoader` for `.xml` files (#5955) # Unstructured XML Loader Adds an `UnstructuredXMLLoader` class for .xml files. Works with unstructured>=0.6.7. A plain text representation of the text with the XML tags will be available under the `page_content` attribute in the doc. ### Testing ```python from langchain.document_loaders import UnstructuredXMLLoader loader = UnstructuredXMLLoader( "example_data/factbook.xml", ) docs = loader.load() ``` ## Who can review? @hwchase17 @eyurtsev --- .../examples/example_data/factbook.xml | 27 +++++++ .../document_loaders/examples/xml.ipynb | 78 +++++++++++++++++++ langchain/document_loaders/__init__.py | 2 + langchain/document_loaders/xml.py | 22 ++++++ .../document_loaders/test_xml.py | 15 ++++ tests/integration_tests/examples/factbook.xml | 27 +++++++ 6 files changed, 171 insertions(+) create mode 100644 docs/modules/indexes/document_loaders/examples/example_data/factbook.xml create mode 100644 docs/modules/indexes/document_loaders/examples/xml.ipynb create mode 100644 langchain/document_loaders/xml.py create mode 100644 tests/integration_tests/document_loaders/test_xml.py create mode 100644 tests/integration_tests/examples/factbook.xml diff --git a/docs/modules/indexes/document_loaders/examples/example_data/factbook.xml b/docs/modules/indexes/document_loaders/examples/example_data/factbook.xml new file mode 100644 index 0000000000000..d059ee9d0c595 --- /dev/null +++ b/docs/modules/indexes/document_loaders/examples/example_data/factbook.xml @@ -0,0 +1,27 @@ + + + + United States + Washington, DC + Joe Biden + Baseball + + + Canada + Ottawa + Justin Trudeau + Hockey + + + France + Paris + Emmanuel Macron + Soccer + + + Trinidad & Tobado + Port of Spain + Keith Rowley + Track & Field + + diff --git a/docs/modules/indexes/document_loaders/examples/xml.ipynb b/docs/modules/indexes/document_loaders/examples/xml.ipynb new file mode 100644 index 0000000000000..5c95986800190 --- /dev/null +++ b/docs/modules/indexes/document_loaders/examples/xml.ipynb @@ -0,0 +1,78 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "22a849cc", + "metadata": {}, + "source": [ + "# XML\n", + "\n", + "The `UnstructuredXMLLoader` is used to load `XML` files. The loader works with `.xml` files. The page content will be the text extracted from the XML tags." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "e6616e3a", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.document_loaders import UnstructuredXMLLoader" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a654e4d9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Document(page_content='United States\\n\\nWashington, DC\\n\\nJoe Biden\\n\\nBaseball\\n\\nCanada\\n\\nOttawa\\n\\nJustin Trudeau\\n\\nHockey\\n\\nFrance\\n\\nParis\\n\\nEmmanuel Macron\\n\\nSoccer\\n\\nTrinidad & Tobado\\n\\nPort of Spain\\n\\nKeith Rowley\\n\\nTrack & Field', metadata={'source': 'example_data/factbook.xml'})" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "loader = UnstructuredXMLLoader(\n", + " \"example_data/factbook.xml\",\n", + ")\n", + "docs = loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a54342bb", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.15" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/document_loaders/__init__.py b/langchain/document_loaders/__init__.py index d533159af0441..87d2335e2344c 100644 --- a/langchain/document_loaders/__init__.py +++ b/langchain/document_loaders/__init__.py @@ -121,6 +121,7 @@ Docx2txtLoader, UnstructuredWordDocumentLoader, ) +from langchain.document_loaders.xml import UnstructuredXMLLoader from langchain.document_loaders.youtube import ( GoogleApiClient, GoogleApiYoutubeLoader, @@ -242,6 +243,7 @@ "UnstructuredRTFLoader", "UnstructuredURLLoader", "UnstructuredWordDocumentLoader", + "UnstructuredXMLLoader", "WeatherDataLoader", "WebBaseLoader", "WhatsAppChatLoader", diff --git a/langchain/document_loaders/xml.py b/langchain/document_loaders/xml.py new file mode 100644 index 0000000000000..78156ee205ef4 --- /dev/null +++ b/langchain/document_loaders/xml.py @@ -0,0 +1,22 @@ +"""Loader that loads Microsoft Excel files.""" +from typing import Any, List + +from langchain.document_loaders.unstructured import ( + UnstructuredFileLoader, + validate_unstructured_version, +) + + +class UnstructuredXMLLoader(UnstructuredFileLoader): + """Loader that uses unstructured to load XML files.""" + + def __init__( + self, file_path: str, mode: str = "single", **unstructured_kwargs: Any + ): + validate_unstructured_version(min_unstructured_version="0.6.7") + super().__init__(file_path=file_path, mode=mode, **unstructured_kwargs) + + def _get_elements(self) -> List: + from unstructured.partition.xml import partition_xml + + return partition_xml(filename=self.file_path, **self.unstructured_kwargs) diff --git a/tests/integration_tests/document_loaders/test_xml.py b/tests/integration_tests/document_loaders/test_xml.py new file mode 100644 index 0000000000000..a4ea69e728d50 --- /dev/null +++ b/tests/integration_tests/document_loaders/test_xml.py @@ -0,0 +1,15 @@ +import os +from pathlib import Path + +from langchain.document_loaders import UnstructuredXMLLoader + +EXAMPLE_DIRECTORY = file_path = Path(__file__).parent.parent / "examples" + + +def test_unstructured_xml_loader() -> None: + """Test unstructured loader.""" + file_path = os.path.join(EXAMPLE_DIRECTORY, "factbook.xml") + loader = UnstructuredXMLLoader(str(file_path)) + docs = loader.load() + + assert len(docs) == 1 diff --git a/tests/integration_tests/examples/factbook.xml b/tests/integration_tests/examples/factbook.xml new file mode 100644 index 0000000000000..d059ee9d0c595 --- /dev/null +++ b/tests/integration_tests/examples/factbook.xml @@ -0,0 +1,27 @@ + + + + United States + Washington, DC + Joe Biden + Baseball + + + Canada + Ottawa + Justin Trudeau + Hockey + + + France + Paris + Emmanuel Macron + Soccer + + + Trinidad & Tobado + Port of Spain + Keith Rowley + Track & Field + + From f8cf09a2300621185dde10704a84b553d93afe30 Mon Sep 17 00:00:00 2001 From: Ofer Mendelevitch Date: Sat, 10 Jun 2023 16:27:01 -0700 Subject: [PATCH 15/46] Update to Vectara integration (#5950) This PR updates the Vectara integration (@hwchase17 ): * Adds reuse of requests.session to imrpove efficiency and speed. * Utilizes Vectara's low-level API (instead of standard API) to better match user's specific chunking with LangChain * Now add_texts puts all the texts into a single Vectara document so indexing is much faster. * updated variables names from alpha to lambda_val (to be consistent with Vectara docs) and added n_context_sentence so it's available to use if needed. * Updates to documentation and tests --------- Co-authored-by: Harrison Chase --- docs/integrations/vectara.md | 24 +++- docs/integrations/vectara/vectara_chat.ipynb | 52 ++++---- .../vectara/vectara_text_generation.ipynb | 5 +- .../vectorstores/examples/vectara.ipynb | 44 ++++--- .../vectorstores/examples/weaviate.ipynb | 7 +- langchain/vectorstores/vectara.py | 121 +++++++++++------- .../vectorstores/test_vectara.py | 4 +- 7 files changed, 161 insertions(+), 96 deletions(-) diff --git a/docs/integrations/vectara.md b/docs/integrations/vectara.md index 4dde4faab9163..6c601264e1ed9 100644 --- a/docs/integrations/vectara.md +++ b/docs/integrations/vectara.md @@ -4,7 +4,7 @@ What is Vectara? **Vectara Overview:** -- Vectara is developer-first API platform for building conversational search applications +- Vectara is developer-first API platform for building GenAI applications - To use Vectara - first [sign up](https://console.vectara.com/signup) and create an account. Then create a corpus and an API key for indexing and searching. - You can use Vectara's [indexing API](https://docs.vectara.com/docs/indexing-apis/indexing) to add documents into Vectara's index - You can use Vectara's [Search API](https://docs.vectara.com/docs/search-apis/search) to query Vectara's index (which also supports Hybrid search implicitly). @@ -13,6 +13,13 @@ What is Vectara? ## Installation and Setup To use Vectara with LangChain no special installation steps are required. You just have to provide your customer_id, corpus ID, and an API key created within the Vectara console to enable indexing and searching. +Alternatively these can be provided as environment variables +- export `VECTARA_CUSTOMER_ID`="your_customer_id" +- export `VECTARA_CORPUS_ID`="your_corpus_id" +- export `VECTARA_API_KEY`="your-vectara-api-key" + +## Usage + ### VectorStore There exists a wrapper around the Vectara platform, allowing you to use it as a vectorstore, whether for semantic search or example selection. @@ -32,8 +39,21 @@ vectara = Vectara( ``` The customer_id, corpus_id and api_key are optional, and if they are not supplied will be read from the environment variables `VECTARA_CUSTOMER_ID`, `VECTARA_CORPUS_ID` and `VECTARA_API_KEY`, respectively. +To query the vectorstore, you can use the `similarity_search` method (or `similarity_search_with_score`), which takes a query string and returns a list of results: +```python +results = vectara.similarity_score("what is LangChain?") +``` + +`similarity_search_with_score` also supports the following additional arguments: +- `k`: number of results to return (defaults to 5) +- `lambda_val`: the [lexical matching](https://docs.vectara.com/docs/api-reference/search-apis/lexical-matching) factor for hybrid search (defaults to 0.025) +- `filter`: a [filter](https://docs.vectara.com/docs/common-use-cases/filtering-by-metadata/filter-overview) to apply to the results (default None) +- `n_sentence_context`: number of sentences to include before/after the actual matching segment when returning results. This defaults to 0 so as to return the exact text segment that matches, but can be used with other values e.g. 2 or 3 to return adjacent text segments. + +The results are returned as a list of relevant documents, and a relevance score of each document. + -For a more detailed walkthrough of the Vectara wrapper, see one of the two example notebooks: +For a more detailed examples of using the Vectara wrapper, see one of these two sample notebooks: * [Chat Over Documents with Vectara](./vectara/vectara_chat.html) * [Vectara Text Generation](./vectara/vectara_text_generation.html) diff --git a/docs/integrations/vectara/vectara_chat.ipynb b/docs/integrations/vectara/vectara_chat.ipynb index 9a3318fc7cab8..1af862b3624e3 100644 --- a/docs/integrations/vectara/vectara_chat.ipynb +++ b/docs/integrations/vectara/vectara_chat.ipynb @@ -102,21 +102,11 @@ "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], + "outputs": [], "source": [ "openai_api_key = os.environ['OPENAI_API_KEY']\n", "llm = OpenAI(openai_api_key=openai_api_key, temperature=0)\n", - "retriever = VectaraRetriever(vectorstore, alpha=0.025, k=5, filter=None)\n", - "\n", - "print(type(vectorstore))\n", + "retriever = vectorstore.as_retriever(lambda_val=0.025, k=5, filter=None)\n", "d = retriever.get_relevant_documents('What did the president say about Ketanji Brown Jackson')\n", "\n", "qa = ConversationalRetrievalChain.from_llm(llm, retriever, memory=memory)" @@ -142,7 +132,7 @@ { "data": { "text/plain": [ - "\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, and a former federal public defender.\"" + "\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\"" ] }, "execution_count": 7, @@ -174,7 +164,7 @@ { "data": { "text/plain": [ - "' Justice Stephen Breyer.'" + "' Justice Stephen Breyer'" ] }, "execution_count": 9, @@ -241,7 +231,7 @@ { "data": { "text/plain": [ - "\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, and a former federal public defender.\"" + "\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\"" ] }, "execution_count": 12, @@ -286,7 +276,7 @@ { "data": { "text/plain": [ - "' Justice Stephen Breyer.'" + "' Justice Stephen Breyer'" ] }, "execution_count": 14, @@ -344,7 +334,7 @@ { "data": { "text/plain": [ - "Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice. A former federal public defender.', metadata={'source': '../../modules/state_of_the_union.txt'})" + "Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})" ] }, "execution_count": 17, @@ -392,6 +382,24 @@ "result = qa({\"question\": query, \"chat_history\": chat_history, \"vectordbkwargs\": vectordbkwargs})" ] }, + { + "cell_type": "code", + "execution_count": 35, + "id": "24ebdaec", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\n" + ] + } + ], + "source": [ + "print(result['answer'])" + ] + }, { "cell_type": "markdown", "id": "99b96dae", @@ -459,7 +467,7 @@ { "data": { "text/plain": [ - "' The president did not mention Ketanji Brown Jackson.'" + "\" The president said that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, who he described as one of the nation's top legal minds, to continue Justice Breyer's legacy of excellence.\"" ] }, "execution_count": 23, @@ -538,7 +546,7 @@ { "data": { "text/plain": [ - "' The president did not mention Ketanji Brown Jackson.\\nSOURCES: ../../modules/state_of_the_union.txt'" + "\" The president said that he nominated Circuit Court of Appeals Judge Ketanji Brown Jackson, who he described as one of the nation's top legal minds, and that she will continue Justice Breyer's legacy of excellence.\\nSOURCES: ../../../state_of_the_union.txt\"" ] }, "execution_count": 27, @@ -598,7 +606,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, and a former federal public defender." + " The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence." ] } ], @@ -620,7 +628,7 @@ "name": "stdout", "output_type": "stream", "text": [ - " Justice Stephen Breyer." + " Justice Stephen Breyer" ] } ], @@ -681,7 +689,7 @@ { "data": { "text/plain": [ - "\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds, a former top litigator in private practice, and a former federal public defender.\"" + "\" The president said that Ketanji Brown Jackson is one of the nation's top legal minds and that she will continue Justice Breyer's legacy of excellence.\"" ] }, "execution_count": 33, diff --git a/docs/integrations/vectara/vectara_text_generation.ipynb b/docs/integrations/vectara/vectara_text_generation.ipynb index bd70d1d5a2a55..438bad758179a 100644 --- a/docs/integrations/vectara/vectara_text_generation.ipynb +++ b/docs/integrations/vectara/vectara_text_generation.ipynb @@ -6,7 +6,7 @@ "source": [ "# Vectara Text Generation\n", "\n", - "This notebook is based on [chat_vector_db](https://github.com/hwchase17/langchain/blob/master/docs/modules/chains/index_examples/question_answering.ipynb) and adapted to Vectara." + "This notebook is based on [text generation](https://github.com/hwchase17/langchain/blob/master/docs/modules/chains/index_examples/vector_db_text_generation.ipynb) notebook and adapted to Vectara." ] }, { @@ -24,6 +24,7 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "from langchain.llms import OpenAI\n", "from langchain.docstore.document import Document\n", "import requests\n", @@ -159,7 +160,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "[{'text': '\\n\\nEnvironment variables are an essential part of any development workflow. They provide a way to store and access information that is specific to the environment in which the code is running. This can be especially useful when working with different versions of a language or framework, or when running code on different machines.\\n\\nThe Deno CLI tasks extension provides a way to easily manage environment variables when running Deno commands. This extension provides a task definition for allowing you to create tasks that execute the `deno` CLI from within the editor. The template for the Deno CLI tasks has the following interface, which can be configured in a `tasks.json` within your workspace:\\n\\nThe task definition includes the `type` field, which should be set to `deno`, and the `command` field, which is the `deno` command to run (e.g. `run`, `test`, `cache`, etc.). Additionally, you can specify additional arguments to pass on the command line, the current working directory to execute the command, and any environment variables.\\n\\nUsing environment variables with the Deno CLI tasks extension is a great way to ensure that your code is running in the correct environment. For example, if you are running a test suite,'}, {'text': '\\n\\nEnvironment variables are an important part of any programming language, and they can be used to store and access data in a variety of ways. In this blog post, we\\'ll be taking a look at environment variables specifically for the shell.\\n\\nShell variables are similar to environment variables, but they won\\'t be exported to spawned commands. They are defined with the following syntax:\\n\\n```sh\\nVAR_NAME=value\\n```\\n\\nShell variables can be used to store and access data in a variety of ways. For example, you can use them to store values that you want to re-use, but don\\'t want to be available in any spawned processes.\\n\\nFor example, if you wanted to store a value and then use it in a command, you could do something like this:\\n\\n```sh\\nVAR=hello && echo $VAR && deno eval \"console.log(\\'Deno: \\' + Deno.env.get(\\'VAR\\'))\"\\n```\\n\\nThis would output the following:\\n\\n```\\nhello\\nDeno: undefined\\n```\\n\\nAs you can see, the value stored in the shell variable is not available in the spawned process.\\n\\n'}, {'text': '\\n\\nWhen it comes to developing applications, environment variables are an essential part of the process. Environment variables are used to store information that can be used by applications and scripts to customize their behavior. This is especially important when it comes to developing applications with Deno, as there are several environment variables that can impact the behavior of Deno.\\n\\nThe most important environment variable for Deno is `DENO_AUTH_TOKENS`. This environment variable is used to store authentication tokens that are used to access remote resources. This is especially important when it comes to accessing remote APIs or databases. Without the proper authentication tokens, Deno will not be able to access the remote resources.\\n\\nAnother important environment variable for Deno is `DENO_DIR`. This environment variable is used to store the directory where Deno will store its files. This includes the Deno executable, the Deno cache, and the Deno configuration files. By setting this environment variable, you can ensure that Deno will always be able to find the files it needs.\\n\\nFinally, there is the `DENO_PLUGINS` environment variable. This environment variable is used to store the list of plugins that Deno will use. This is important for customizing the'}, {'text': '\\n\\nEnvironment variables are a great way to store and access sensitive information in your Deno applications. Deno offers built-in support for environment variables with `Deno.env`, and you can also use a `.env` file to store and access environment variables. In this blog post, we\\'ll explore both of these options and how to use them in your Deno applications.\\n\\n## Built-in `Deno.env`\\n\\nThe Deno runtime offers built-in support for environment variables with [`Deno.env`](https://deno.land/api@v1.25.3?s=Deno.env). `Deno.env` has getter and setter methods. Here is example usage:\\n\\n```ts\\nDeno.env.set(\"FIREBASE_API_KEY\", \"examplekey123\");\\nDeno.env.set(\"FIREBASE_AUTH_DOMAIN\", \"firebasedomain.com\");\\n\\nconsole.log(Deno.env.get(\"FIREBASE_API_KEY\")); // examplekey123\\nconsole.log(Deno.env.get(\"FIREBASE_AUTH_'}]\n" + "[{'text': '\\n\\nEnvironment variables are a powerful tool for managing configuration settings in your applications. They allow you to store and access values from anywhere in your code, making it easier to keep your codebase organized and maintainable.\\n\\nHowever, there are times when you may want to use environment variables specifically for a single command. This is where shell variables come in. Shell variables are similar to environment variables, but they won\\'t be exported to spawned commands. They are defined with the following syntax:\\n\\n```sh\\nVAR_NAME=value\\n```\\n\\nFor example, if you wanted to use a shell variable instead of an environment variable in a command, you could do something like this:\\n\\n```sh\\nVAR=hello && echo $VAR && deno eval \"console.log(\\'Deno: \\' + Deno.env.get(\\'VAR\\'))\"\\n```\\n\\nThis would output the following:\\n\\n```\\nhello\\nDeno: undefined\\n```\\n\\nShell variables can be useful when you want to re-use a value, but don\\'t want it available in any spawned processes.\\n\\nAnother way to use environment variables is through pipelines. Pipelines provide a way to pipe the'}, {'text': '\\n\\nEnvironment variables are a great way to store and access sensitive information in your applications. They are also useful for configuring applications and managing different environments. In Deno, there are two ways to use environment variables: the built-in `Deno.env` and the `.env` file.\\n\\nThe `Deno.env` is a built-in feature of the Deno runtime that allows you to set and get environment variables. It has getter and setter methods that you can use to access and set environment variables. For example, you can set the `FIREBASE_API_KEY` and `FIREBASE_AUTH_DOMAIN` environment variables like this:\\n\\n```ts\\nDeno.env.set(\"FIREBASE_API_KEY\", \"examplekey123\");\\nDeno.env.set(\"FIREBASE_AUTH_DOMAIN\", \"firebasedomain.com\");\\n\\nconsole.log(Deno.env.get(\"FIREBASE_API_KEY\")); // examplekey123\\nconsole.log(Deno.env.get(\"FIREBASE_AUTH_DOMAIN\")); // firebasedomain'}, {'text': \"\\n\\nEnvironment variables are a powerful tool for managing configuration and settings in your applications. They allow you to store and access values that can be used in your code, and they can be set and changed without having to modify your code.\\n\\nIn Deno, environment variables are defined using the `export` command. For example, to set a variable called `VAR_NAME` to the value `value`, you would use the following command:\\n\\n```sh\\nexport VAR_NAME=value\\n```\\n\\nYou can then access the value of the environment variable in your code using the `Deno.env.get()` method. For example, if you wanted to log the value of the `VAR_NAME` variable, you could use the following code:\\n\\n```js\\nconsole.log(Deno.env.get('VAR_NAME'));\\n```\\n\\nYou can also set environment variables for a single command. To do this, you can list the environment variables before the command, like so:\\n\\n```\\nVAR=hello VAR2=bye deno run main.ts\\n```\\n\\nThis will set the environment variables `VAR` and `V\"}, {'text': \"\\n\\nEnvironment variables are a powerful tool for managing settings and configuration in your applications. They can be used to store information such as user preferences, application settings, and even passwords. In this blog post, we'll discuss how to make Deno scripts executable with a hashbang (shebang).\\n\\nA hashbang is a line of code that is placed at the beginning of a script. It tells the system which interpreter to use when running the script. In the case of Deno, the hashbang should be `#!/usr/bin/env -S deno run --allow-env`. This tells the system to use the Deno interpreter and to allow the script to access environment variables.\\n\\nOnce the hashbang is in place, you may need to give the script execution permissions. On Linux, this can be done with the command `sudo chmod +x hashbang.ts`. After that, you can execute the script by calling it like any other command: `./hashbang.ts`.\\n\\nIn the example program, we give the context permission to access the environment variables and print the Deno installation path. This is done by using the `Deno.env.get()` function, which returns the value of the specified environment\"}]\n" ] } ], diff --git a/docs/modules/indexes/vectorstores/examples/vectara.ipynb b/docs/modules/indexes/vectorstores/examples/vectara.ipynb index 6551f55cbc049..034a7daeb27b1 100644 --- a/docs/modules/indexes/vectorstores/examples/vectara.ipynb +++ b/docs/modules/indexes/vectorstores/examples/vectara.ipynb @@ -101,7 +101,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "id": "8429667e", "metadata": { "ExecuteTime": { @@ -133,7 +133,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "id": "a8c513ab", "metadata": { "ExecuteTime": { @@ -145,12 +145,12 @@ "outputs": [], "source": [ "query = \"What did the president say about Ketanji Brown Jackson\"\n", - "found_docs = vectara.similarity_search(query)" + "found_docs = vectara.similarity_search(query, n_sentence_context=0)" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "id": "fc516993", "metadata": { "ExecuteTime": { @@ -164,7 +164,13 @@ "name": "stdout", "output_type": "stream", "text": [ - "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice. A former federal public defender.\n" + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" ] } ], @@ -185,7 +191,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "id": "8804a21d", "metadata": { "ExecuteTime": { @@ -201,7 +207,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "id": "756a6887", "metadata": { "ExecuteTime": { @@ -214,9 +220,15 @@ "name": "stdout", "output_type": "stream", "text": [ - "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice. A former federal public defender.\n", + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", "\n", - "Score: 1.0046461\n" + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n", + "\n", + "Score: 0.7129974\n" ] } ], @@ -239,7 +251,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 9, "id": "9427195f", "metadata": { "ExecuteTime": { @@ -251,10 +263,10 @@ { "data": { "text/plain": [ - "VectorStoreRetriever(vectorstore=, search_type='similarity', search_kwargs={})" + "VectaraRetriever(vectorstore=, search_type='similarity', search_kwargs={'lambda_val': 0.025, 'k': 5, 'filter': '', 'n_sentence_context': '0'})" ] }, - "execution_count": 11, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -266,7 +278,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 10, "id": "f3c70c31", "metadata": { "ExecuteTime": { @@ -278,10 +290,10 @@ { "data": { "text/plain": [ - "Document(page_content='Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence. A former top litigator in private practice. A former federal public defender.', metadata={'source': '../../modules/state_of_the_union.txt'})" + "Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})" ] }, - "execution_count": 15, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -316,7 +328,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.10.9" } }, "nbformat": 4, diff --git a/docs/modules/indexes/vectorstores/examples/weaviate.ipynb b/docs/modules/indexes/vectorstores/examples/weaviate.ipynb index 2b151716be154..b73957ed1e166 100644 --- a/docs/modules/indexes/vectorstores/examples/weaviate.ipynb +++ b/docs/modules/indexes/vectorstores/examples/weaviate.ipynb @@ -209,7 +209,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "8fc3487b", "metadata": {}, @@ -218,7 +217,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "281c0fcc", "metadata": {}, @@ -236,7 +234,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "503e2e75", "metadata": {}, @@ -273,7 +270,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "fbd7a6cb", "metadata": {}, @@ -282,7 +278,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "f349acb9", "metadata": {}, @@ -384,7 +379,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.10.9" } }, "nbformat": 4, diff --git a/langchain/vectorstores/vectara.py b/langchain/vectorstores/vectara.py index acd7e63d4bbd8..ed6551a89ca91 100644 --- a/langchain/vectorstores/vectara.py +++ b/langchain/vectorstores/vectara.py @@ -55,6 +55,8 @@ def __init__( else: logging.debug(f"Using corpus id {self._vectara_corpus_id}") self._session = requests.Session() # to reuse connections + adapter = requests.adapters.HTTPAdapter(max_retries=3) + self._session.mount("http://", adapter) def _get_post_headers(self) -> dict: """Returns headers that should be attached to each post request.""" @@ -95,19 +97,15 @@ def _delete_doc(self, doc_id: str) -> bool: return False return True - def _index_doc(self, doc_id: str, text: str, metadata: dict) -> bool: + def _index_doc(self, doc: dict) -> bool: request: dict[str, Any] = {} request["customer_id"] = self._vectara_customer_id request["corpus_id"] = self._vectara_corpus_id - request["document"] = { - "document_id": doc_id, - "metadataJson": json.dumps(metadata), - "section": [{"text": text, "metadataJson": json.dumps(metadata)}], - } + request["document"] = doc response = self._session.post( headers=self._get_post_headers(), - url="https://api.vectara.io/v1/index", + url="https://api.vectara.io/v1/core/index", data=json.dumps(request), timeout=30, verify=True, @@ -138,22 +136,33 @@ def add_texts( List of ids from adding the texts into the vectorstore. """ - ids = [md5(text.encode("utf-8")).hexdigest() for text in texts] - for i, doc in enumerate(texts): - doc_id = ids[i] - metadata = metadatas[i] if metadatas else {} - succeeded = self._index_doc(doc_id, doc, metadata) - if not succeeded: - self._delete_doc(doc_id) - self._index_doc(doc_id, doc, metadata) - return ids + doc_hash = md5() + for t in texts: + doc_hash.update(t.encode()) + doc_id = doc_hash.hexdigest() + if metadatas is None: + metadatas = [{} for _ in texts] + doc = { + "document_id": doc_id, + "metadataJson": json.dumps({"source": "langchain"}), + "parts": [ + {"text": text, "metadataJson": json.dumps(md)} + for text, md in zip(texts, metadatas) + ], + } + succeeded = self._index_doc(doc) + if not succeeded: + self._delete_doc(doc_id) + self._index_doc(doc) + return [doc_id] def similarity_search_with_score( self, query: str, k: int = 5, - alpha: float = 0.025, + lambda_val: float = 0.025, filter: Optional[str] = None, + n_sentence_context: int = 0, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return Vectara documents most similar to query, along with scores. @@ -161,42 +170,45 @@ def similarity_search_with_score( Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 5. - alpha: parameter for hybrid search (called "lambda" in Vectara - documentation). + lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. + n_sentence_context: number of sentences before/after the matching segment + to add Returns: List of Documents most similar to the query and score for each. """ + data = json.dumps( + { + "query": [ + { + "query": query, + "start": 0, + "num_results": k, + "context_config": { + "sentences_before": n_sentence_context, + "sentences_after": n_sentence_context, + }, + "corpus_key": [ + { + "customer_id": self._vectara_customer_id, + "corpus_id": self._vectara_corpus_id, + "metadataFilter": filter, + "lexical_interpolation_config": {"lambda": lambda_val}, + } + ], + } + ] + } + ) + response = self._session.post( headers=self._get_post_headers(), url="https://api.vectara.io/v1/query", - data=json.dumps( - { - "query": [ - { - "query": query, - "start": 0, - "num_results": k, - "context_config": { - "sentences_before": 3, - "sentences_after": 3, - }, - "corpus_key": [ - { - "customer_id": self._vectara_customer_id, - "corpus_id": self._vectara_corpus_id, - "metadataFilter": filter, - "lexical_interpolation_config": {"lambda": alpha}, - } - ], - } - ] - } - ), + data=data, timeout=10, ) @@ -231,8 +243,9 @@ def similarity_search( self, query: str, k: int = 5, - alpha: float = 0.025, + lambda_val: float = 0.025, filter: Optional[str] = None, + n_sentence_context: int = 0, **kwargs: Any, ) -> List[Document]: """Return Vectara documents most similar to query, along with scores. @@ -244,12 +257,19 @@ def similarity_search( filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. + n_sentence_context: number of sentences before/after the matching segment + to add Returns: List of Documents most similar to the query """ docs_and_scores = self.similarity_search_with_score( - query, k=k, alpha=alpha, filter=filter, **kwargs + query, + k=k, + lamnbda_val=lambda_val, + filter=filter, + n_sentence_context=n_sentence_context, + **kwargs, ) return [doc for doc, _ in docs_and_scores] @@ -286,15 +306,22 @@ def as_retriever(self, **kwargs: Any) -> VectaraRetriever: class VectaraRetriever(VectorStoreRetriever): vectorstore: Vectara - search_kwargs: dict = Field(default_factory=lambda: {"alpha": 0.025, "k": 5}) + search_kwargs: dict = Field( + default_factory=lambda: { + "lambda_val": 0.025, + "k": 5, + "filter": "", + "n_sentence_context": "0", + } + ) """Search params. k: Number of Documents to return. Defaults to 5. - alpha: parameter for hybrid search (called "lambda" in Vectara - documentation). + lambda_val: lexical match parameter for hybrid search. filter: Dictionary of argument(s) to filter on metadata. For example a filter can be "doc.rating > 3.0 and part.lang = 'deu'"} see https://docs.vectara.com/docs/search-apis/sql/filter-overview for more details. + n_sentence_context: number of sentences before/after the matching segment to add """ def add_texts( diff --git a/tests/integration_tests/vectorstores/test_vectara.py b/tests/integration_tests/vectorstores/test_vectara.py index aaa5eaa56cde7..2a08194d20e7a 100644 --- a/tests/integration_tests/vectorstores/test_vectara.py +++ b/tests/integration_tests/vectorstores/test_vectara.py @@ -27,7 +27,9 @@ def test_vectara_add_documents() -> None: ) # finally do a similarity search to see if all works okay - output = docsearch.similarity_search("large language model", k=2) + output = docsearch.similarity_search( + "large language model", k=2, n_sentence_context=0 + ) assert output[0].page_content == "large language model" assert output[0].metadata == {"abbr": "llm"} assert output[1].page_content == "information retrieval" From 1250cd4630218dd7983e9c544cd1965606a6712f Mon Sep 17 00:00:00 2001 From: Jens Madsen Date: Sun, 11 Jun 2023 01:36:03 +0200 Subject: [PATCH 16/46] fix: use model token limit not tokenizer ditto (#5939) This fixes a token limit bug in the SentenceTransformersTokenTextSplitter. Before the token limit was taken from tokenizer used by the model. However, for some models the token limit of the tokenizer (from `AutoTokenizer.from_pretrained`) does not equal the token limit of the model. This was a false assumption. Therefore, the token limit of the text splitter is now taken from the sentence transformers model token limit. Twitter: @plasmajens #### Before submitting #### Who can review? @hwchase17 and/or @dev2049 --------- Co-authored-by: Harrison Chase --- langchain/text_splitter.py | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/langchain/text_splitter.py b/langchain/text_splitter.py index 23332f291ae93..7c875682b9eef 100644 --- a/langchain/text_splitter.py +++ b/langchain/text_splitter.py @@ -20,6 +20,7 @@ Type, TypeVar, Union, + cast, ) from langchain.docstore.document import Document @@ -59,7 +60,7 @@ def __init__( length_function: Callable[[str], int] = len, keep_separator: bool = False, add_start_index: bool = False, - ): + ) -> None: """Create a new TextSplitter. Args: @@ -240,7 +241,7 @@ async def atransform_documents( class CharacterTextSplitter(TextSplitter): """Implementation of splitting text that looks at characters.""" - def __init__(self, separator: str = "\n\n", **kwargs: Any): + def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs) self._separator = separator @@ -265,7 +266,7 @@ class Tokenizer: def split_text_on_tokens(*, text: str, tokenizer: Tokenizer) -> List[str]: """Split incoming text and return chunks.""" - splits = [] + splits: List[str] = [] input_ids = tokenizer.encode(text) start_idx = 0 cur_idx = min(start_idx + tokenizer.tokens_per_chunk, len(input_ids)) @@ -288,7 +289,7 @@ def __init__( allowed_special: Union[Literal["all"], AbstractSet[str]] = set(), disallowed_special: Union[Literal["all"], Collection[str]] = "all", **kwargs: Any, - ): + ) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs) try: @@ -335,19 +336,28 @@ def __init__( model_name: str = "sentence-transformers/all-mpnet-base-v2", tokens_per_chunk: Optional[int] = None, **kwargs: Any, - ): + ) -> None: """Create a new TextSplitter.""" super().__init__(**kwargs, chunk_overlap=chunk_overlap) - from transformers import AutoTokenizer + + try: + from sentence_transformers import SentenceTransformer + except ImportError: + raise ImportError( + "Could not import sentence_transformer python package. " + "This is needed in order to for SentenceTransformersTokenTextSplitter. " + "Please install it with `pip install sentence-transformers`." + ) self.model_name = model_name - self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) + self._model = SentenceTransformer(self.model_name) + self.tokenizer = self._model.tokenizer self._initialize_chunk_configuration(tokens_per_chunk=tokens_per_chunk) def _initialize_chunk_configuration( self, *, tokens_per_chunk: Optional[int] ) -> None: - self.maximum_tokens_per_chunk = self.tokenizer.max_len_single_sentence + self.maximum_tokens_per_chunk = cast(int, self._model.max_seq_length) if tokens_per_chunk is None: self.tokens_per_chunk = self.maximum_tokens_per_chunk @@ -419,7 +429,7 @@ def __init__( separators: Optional[List[str]] = None, keep_separator: bool = True, **kwargs: Any, - ): + ) -> None: """Create a new TextSplitter.""" super().__init__(keep_separator=keep_separator, **kwargs) self._separators = separators or ["\n\n", "\n", " ", ""] @@ -785,7 +795,7 @@ def get_separators_for_language(language: Language) -> List[str]: class NLTKTextSplitter(TextSplitter): """Implementation of splitting text that looks at sentences using NLTK.""" - def __init__(self, separator: str = "\n\n", **kwargs: Any): + def __init__(self, separator: str = "\n\n", **kwargs: Any) -> None: """Initialize the NLTK splitter.""" super().__init__(**kwargs) try: @@ -810,7 +820,7 @@ class SpacyTextSplitter(TextSplitter): def __init__( self, separator: str = "\n\n", pipeline: str = "en_core_web_sm", **kwargs: Any - ): + ) -> None: """Initialize the spacy text splitter.""" super().__init__(**kwargs) try: @@ -832,7 +842,7 @@ def split_text(self, text: str) -> List[str]: class PythonCodeTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Python syntax.""" - def __init__(self, **kwargs: Any): + def __init__(self, **kwargs: Any) -> None: """Initialize a PythonCodeTextSplitter.""" separators = self.get_separators_for_language(Language.PYTHON) super().__init__(separators=separators, **kwargs) @@ -841,7 +851,7 @@ def __init__(self, **kwargs: Any): class MarkdownTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Markdown-formatted headings.""" - def __init__(self, **kwargs: Any): + def __init__(self, **kwargs: Any) -> None: """Initialize a MarkdownTextSplitter.""" separators = self.get_separators_for_language(Language.MARKDOWN) super().__init__(separators=separators, **kwargs) @@ -850,7 +860,7 @@ def __init__(self, **kwargs: Any): class LatexTextSplitter(RecursiveCharacterTextSplitter): """Attempts to split the text along Latex-formatted layout elements.""" - def __init__(self, **kwargs: Any): + def __init__(self, **kwargs: Any) -> None: """Initialize a LatexTextSplitter.""" separators = self.get_separators_for_language(Language.LATEX) super().__init__(separators=separators, **kwargs) From d2270a2261b1f95993788c3fe53ce4e2a0593a8b Mon Sep 17 00:00:00 2001 From: Satheesh Valluru Date: Sun, 11 Jun 2023 05:13:36 +0530 Subject: [PATCH 17/46] Fix: Grammer fix in documentation (#5925) Fix for grammatical errors in the documentation of `vectorstore`. @vowelparrot --- .../agent_executors/examples/agent_vectorstore.ipynb | 7 ++++++- .../agents/toolkits/examples/vectorstore.ipynb | 12 +++++++++--- 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/docs/modules/agents/agent_executors/examples/agent_vectorstore.ipynb b/docs/modules/agents/agent_executors/examples/agent_vectorstore.ipynb index 04635069c91b8..56a965a555a92 100644 --- a/docs/modules/agents/agent_executors/examples/agent_vectorstore.ipynb +++ b/docs/modules/agents/agent_executors/examples/agent_vectorstore.ipynb @@ -14,6 +14,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "9b22020a", "metadata": {}, @@ -139,6 +140,7 @@ "source": [] }, { + "attachments": {}, "cell_type": "markdown", "id": "c0a6c031", "metadata": {}, @@ -229,7 +231,7 @@ } ], "source": [ - "agent.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")" + "agent.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")" ] }, { @@ -271,6 +273,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "787a9b5e", "metadata": {}, @@ -279,6 +282,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "9161ba91", "metadata": {}, @@ -396,6 +400,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "49a0cbbe", "metadata": {}, diff --git a/docs/modules/agents/toolkits/examples/vectorstore.ipynb b/docs/modules/agents/toolkits/examples/vectorstore.ipynb index fdc5c656c2217..8d47edc800e45 100644 --- a/docs/modules/agents/toolkits/examples/vectorstore.ipynb +++ b/docs/modules/agents/toolkits/examples/vectorstore.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "id": "18ada398-dce6-4049-9b56-fc0ede63da9c", "metadata": {}, @@ -11,6 +12,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "eecb683b-3a46-4b9d-81a3-7caefbfec1a1", "metadata": {}, @@ -88,6 +90,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "f4814175-964d-42f1-aa9d-22801ce1e912", "metadata": {}, @@ -123,6 +126,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "8a38ad10", "metadata": {}, @@ -165,7 +169,7 @@ } ], "source": [ - "agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")" + "agent_executor.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")" ] }, { @@ -203,10 +207,11 @@ } ], "source": [ - "agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address? List the source.\")" + "agent_executor.run(\"What did biden say about ketanji brown jackson in the state of the union address? List the source.\")" ] }, { + "attachments": {}, "cell_type": "markdown", "id": "7ca07707", "metadata": {}, @@ -255,6 +260,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "71680984-edaf-4a63-90f5-94edbd263550", "metadata": {}, @@ -299,7 +305,7 @@ } ], "source": [ - "agent_executor.run(\"What did biden say about ketanji brown jackson is the state of the union address?\")" + "agent_executor.run(\"What did biden say about ketanji brown jackson in the state of the union address?\")" ] }, { From ac3e6e394406ff95dcd47c603db62a2ccbd00b81 Mon Sep 17 00:00:00 2001 From: Thomas B Date: Sun, 11 Jun 2023 01:48:53 +0200 Subject: [PATCH 18/46] Fix IndexError in RecursiveCharacterTextSplitter (#5902) Fixes (not reported) an error that may occur in some cases in the RecursiveCharacterTextSplitter. An empty `new_separators` array ([]) would end up in the else path of the condition below and used in a function where it is expected to be non empty. ```python if new_separators is None: ... else: # _split_text() expects this array to be non-empty! other_info = self._split_text(s, new_separators) ``` resulting in an `IndexError` ```python def _split_text(self, text: str, separators: List[str]) -> List[str]: """Split incoming text and return chunks.""" final_chunks = [] # Get appropriate separator to use > separator = separators[-1] E IndexError: list index out of range langchain/text_splitter.py:425: IndexError ``` #### Who can review? @hwchase17 @eyurtsev --------- Co-authored-by: Harrison Chase --- langchain/text_splitter.py | 4 +-- tests/unit_tests/test_text_splitter.py | 44 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/langchain/text_splitter.py b/langchain/text_splitter.py index 7c875682b9eef..89559505583c7 100644 --- a/langchain/text_splitter.py +++ b/langchain/text_splitter.py @@ -439,7 +439,7 @@ def _split_text(self, text: str, separators: List[str]) -> List[str]: final_chunks = [] # Get appropriate separator to use separator = separators[-1] - new_separators = None + new_separators = [] for i, _s in enumerate(separators): if _s == "": separator = _s @@ -461,7 +461,7 @@ def _split_text(self, text: str, separators: List[str]) -> List[str]: merged_text = self._merge_splits(_good_splits, _separator) final_chunks.extend(merged_text) _good_splits = [] - if new_separators is None: + if not new_separators: final_chunks.append(s) else: other_info = self._split_text(s, new_separators) diff --git a/tests/unit_tests/test_text_splitter.py b/tests/unit_tests/test_text_splitter.py index 2da634cd38b10..91730b0329b90 100644 --- a/tests/unit_tests/test_text_splitter.py +++ b/tests/unit_tests/test_text_splitter.py @@ -1,4 +1,6 @@ """Test text splitting functionality.""" +from typing import List + import pytest from langchain.docstore.document import Document @@ -148,6 +150,48 @@ def test_metadata_not_shallow() -> None: assert docs[1].metadata == {"source": "1"} +def test_iterative_text_splitter_keep_separator() -> None: + chunk_size = 5 + output = __test_iterative_text_splitter(chunk_size=chunk_size, keep_separator=True) + + assert output == [ + "....5", + "X..3", + "Y...4", + "X....5", + "Y...", + ] + + +def test_iterative_text_splitter_discard_separator() -> None: + chunk_size = 5 + output = __test_iterative_text_splitter(chunk_size=chunk_size, keep_separator=False) + + assert output == [ + "....5", + "..3", + "...4", + "....5", + "...", + ] + + +def __test_iterative_text_splitter(chunk_size: int, keep_separator: bool) -> List[str]: + chunk_size += 1 if keep_separator else 0 + + splitter = RecursiveCharacterTextSplitter( + chunk_size=chunk_size, + chunk_overlap=0, + separators=["X", "Y"], + keep_separator=keep_separator, + ) + text = "....5X..3Y...4X....5Y..." + output = splitter.split_text(text) + for chunk in output: + assert len(chunk) <= chunk_size, f"Chunk is larger than {chunk_size}" + return output + + def test_iterative_text_splitter() -> None: """Test iterative text splitter.""" text = """Hi.\n\nI'm Harrison.\n\nHow? Are? You?\nOkay then f f f f. From 0b740c9baa198d1fab000c9468b05540fcea9df0 Mon Sep 17 00:00:00 2001 From: Vincent Date: Sun, 11 Jun 2023 07:51:04 +0800 Subject: [PATCH 19/46] add ocr_languages param for ConfluenceLoader.load() (#5823) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit @eyurtsev 当Confluence文档内容中包含附件,且附件内容为非英文时,提取出来的文本是乱码的。 When the content of the document contains attachments, and the content of the attachments is not in English, the extracted text is garbled. 这主要是因为没有为pytesseract传递lang参数,默认情况下只支持英文。 This is mainly because lang parameter is not passed to pytesseract, and only English is supported by default. 所以我给ConfluenceLoader.load()添加了ocr_languages参数,以便支持多种语言。 So I added the ocr_languages parameter to ConfluenceLoader.load () to support multiple languages. --- langchain/document_loaders/confluence.py | 65 ++++++++++++++++++------ 1 file changed, 50 insertions(+), 15 deletions(-) diff --git a/langchain/document_loaders/confluence.py b/langchain/document_loaders/confluence.py index de08332bd9adc..05806412cb18d 100644 --- a/langchain/document_loaders/confluence.py +++ b/langchain/document_loaders/confluence.py @@ -180,6 +180,7 @@ def load( include_comments: bool = False, limit: Optional[int] = 50, max_pages: Optional[int] = 1000, + ocr_languages: Optional[str] = None, ) -> List[Document]: """ :param space_key: Space key retrieved from a confluence URL, defaults to None @@ -203,6 +204,10 @@ def load( :type limit: int, optional :param max_pages: Maximum number of pages to retrieve in total, defaults 1000 :type max_pages: int, optional + :param ocr_languages: The languages to use for the Tesseract agent. To use a + language, you'll first need to install the appropriate + Tesseract language pack. + :type ocr_languages: str, optional :raises ValueError: _description_ :raises ImportError: _description_ :return: _description_ @@ -226,7 +231,11 @@ def load( expand="body.storage.value", ) docs += self.process_pages( - pages, include_restricted_content, include_attachments, include_comments + pages, + include_restricted_content, + include_attachments, + include_comments, + ocr_languages, ) if label: @@ -252,7 +261,11 @@ def load( expand="body.storage.value", ) docs += self.process_pages( - pages, include_restricted_content, include_attachments, include_comments + pages, + include_restricted_content, + include_attachments, + include_comments, + ocr_languages, ) if page_ids: @@ -272,7 +285,9 @@ def load( page = get_page(page_id=page_id, expand="body.storage.value") if not include_restricted_content and not self.is_public_page(page): continue - doc = self.process_page(page, include_attachments, include_comments) + doc = self.process_page( + page, include_attachments, include_comments, ocr_languages + ) docs.append(doc) return docs @@ -335,13 +350,16 @@ def process_pages( include_restricted_content: bool, include_attachments: bool, include_comments: bool, + ocr_languages: Optional[str] = None, ) -> List[Document]: """Process a list of pages into a list of documents.""" docs = [] for page in pages: if not include_restricted_content and not self.is_public_page(page): continue - doc = self.process_page(page, include_attachments, include_comments) + doc = self.process_page( + page, include_attachments, include_comments, ocr_languages + ) docs.append(doc) return docs @@ -351,6 +369,7 @@ def process_page( page: dict, include_attachments: bool, include_comments: bool, + ocr_languages: Optional[str] = None, ) -> Document: try: from bs4 import BeautifulSoup # type: ignore @@ -361,7 +380,7 @@ def process_page( ) if include_attachments: - attachment_texts = self.process_attachment(page["id"]) + attachment_texts = self.process_attachment(page["id"], ocr_languages) else: attachment_texts = [] text = BeautifulSoup(page["body"]["storage"]["value"], "lxml").get_text( @@ -388,7 +407,11 @@ def process_page( }, ) - def process_attachment(self, page_id: str) -> List[str]: + def process_attachment( + self, + page_id: str, + ocr_languages: Optional[str] = None, + ) -> List[str]: try: from PIL import Image # noqa: F401 except ImportError: @@ -405,13 +428,13 @@ def process_attachment(self, page_id: str) -> List[str]: absolute_url = self.base_url + attachment["_links"]["download"] title = attachment["title"] if media_type == "application/pdf": - text = title + self.process_pdf(absolute_url) + text = title + self.process_pdf(absolute_url, ocr_languages) elif ( media_type == "image/png" or media_type == "image/jpg" or media_type == "image/jpeg" ): - text = title + self.process_image(absolute_url) + text = title + self.process_image(absolute_url, ocr_languages) elif ( media_type == "application/vnd.openxmlformats-officedocument" ".wordprocessingml.document" @@ -420,14 +443,18 @@ def process_attachment(self, page_id: str) -> List[str]: elif media_type == "application/vnd.ms-excel": text = title + self.process_xls(absolute_url) elif media_type == "image/svg+xml": - text = title + self.process_svg(absolute_url) + text = title + self.process_svg(absolute_url, ocr_languages) else: continue texts.append(text) return texts - def process_pdf(self, link: str) -> str: + def process_pdf( + self, + link: str, + ocr_languages: Optional[str] = None, + ) -> str: try: import pytesseract # noqa: F401 from pdf2image import convert_from_bytes # noqa: F401 @@ -452,12 +479,16 @@ def process_pdf(self, link: str) -> str: return text for i, image in enumerate(images): - image_text = pytesseract.image_to_string(image) + image_text = pytesseract.image_to_string(image, lang=ocr_languages) text += f"Page {i + 1}:\n{image_text}\n\n" return text - def process_image(self, link: str) -> str: + def process_image( + self, + link: str, + ocr_languages: Optional[str] = None, + ) -> str: try: import pytesseract # noqa: F401 from PIL import Image # noqa: F401 @@ -481,7 +512,7 @@ def process_image(self, link: str) -> str: except OSError: return text - return pytesseract.image_to_string(image) + return pytesseract.image_to_string(image, lang=ocr_languages) def process_doc(self, link: str) -> str: try: @@ -531,7 +562,11 @@ def process_xls(self, link: str) -> str: return text - def process_svg(self, link: str) -> str: + def process_svg( + self, + link: str, + ocr_languages: Optional[str] = None, + ) -> str: try: import pytesseract # noqa: F401 from PIL import Image # noqa: F401 @@ -560,4 +595,4 @@ def process_svg(self, link: str) -> str: img_data.seek(0) image = Image.open(img_data) - return pytesseract.image_to_string(image) + return pytesseract.image_to_string(image, lang=ocr_languages) From 2d038b57b295133ea328278fca3a4f89c155223a Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sun, 11 Jun 2023 09:48:09 -0700 Subject: [PATCH 20/46] Harrison/arxiv fix (#5993) Co-authored-by: Juanjo do Olmo <87780148+SimplyJuanjo@users.noreply.github.com> --- langchain/tools/arxiv/tool.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/langchain/tools/arxiv/tool.py b/langchain/tools/arxiv/tool.py index 76513e27a187c..a9144dd8faf19 100644 --- a/langchain/tools/arxiv/tool.py +++ b/langchain/tools/arxiv/tool.py @@ -2,6 +2,8 @@ from typing import Optional +from pydantic import Field + from langchain.callbacks.manager import ( AsyncCallbackManagerForToolRun, CallbackManagerForToolRun, @@ -22,7 +24,7 @@ class ArxivQueryRun(BaseTool): "from scientific articles on arxiv.org. " "Input should be a search query." ) - api_wrapper: ArxivAPIWrapper + api_wrapper: ArxivAPIWrapper = Field(default_factory=ArxivAPIWrapper) def _run( self, From b934677a81d81e74aab3b3705f581aebb5450ebb Mon Sep 17 00:00:00 2001 From: Mark Pors Date: Sun, 11 Jun 2023 18:49:26 +0200 Subject: [PATCH 21/46] Obey handler.raise_error in _ahandle_event_for_handler (#6001) Obey `handler.raise_error` in `_ahandle_event_for_handler` Exceptions for async callbacks were only logged as warnings, also when `raise_error = True` #### Who can review? @hwchase17 @agola11 --- langchain/callbacks/manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/langchain/callbacks/manager.py b/langchain/callbacks/manager.py index 2069195f2dfd9..2c935003200a7 100644 --- a/langchain/callbacks/manager.py +++ b/langchain/callbacks/manager.py @@ -238,6 +238,8 @@ async def _ahandle_event_for_handler( else: logger.warning(f"Error in {event_name} callback: {e}") except Exception as e: + if handler.raise_error: + raise e logger.warning(f"Error in {event_name} callback: {e}") From 704d56e2410b04f21ef637e872a4dd8a1375b87c Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sun, 11 Jun 2023 10:09:22 -0700 Subject: [PATCH 22/46] support kwargs (#5990) --- langchain/base_language.py | 24 ++++- langchain/chat_models/anthropic.py | 6 +- langchain/chat_models/base.py | 64 +++++++++---- langchain/chat_models/google_palm.py | 3 + langchain/chat_models/openai.py | 4 + langchain/chat_models/promptlayer_openai.py | 4 + langchain/chat_models/vertexai.py | 7 +- .../experimental/llms/jsonformer_decoder.py | 3 +- langchain/experimental/llms/rellm_decoder.py | 3 +- langchain/llms/ai21.py | 4 +- langchain/llms/aleph_alpha.py | 2 + langchain/llms/anthropic.py | 12 ++- langchain/llms/anyscale.py | 1 + langchain/llms/aviary.py | 1 + langchain/llms/bananadev.py | 2 + langchain/llms/base.py | 92 +++++++++++++------ langchain/llms/baseten.py | 1 + langchain/llms/beam.py | 2 + langchain/llms/bedrock.py | 7 +- langchain/llms/cerebriumai.py | 5 +- langchain/llms/cohere.py | 3 +- langchain/llms/ctransformers.py | 1 + langchain/llms/databricks.py | 2 + langchain/llms/deepinfra.py | 2 + langchain/llms/fake.py | 2 + langchain/llms/forefrontai.py | 3 +- langchain/llms/google_palm.py | 3 + langchain/llms/gooseai.py | 3 + langchain/llms/gpt4all.py | 4 +- langchain/llms/huggingface_endpoint.py | 4 +- langchain/llms/huggingface_hub.py | 4 +- langchain/llms/huggingface_pipeline.py | 1 + .../llms/huggingface_text_gen_inference.py | 2 + langchain/llms/human.py | 1 + langchain/llms/llamacpp.py | 2 + langchain/llms/manifest.py | 8 +- langchain/llms/modal.py | 2 + langchain/llms/mosaicml.py | 2 + langchain/llms/nlpcloud.py | 6 +- langchain/llms/openai.py | 8 ++ langchain/llms/petals.py | 2 + langchain/llms/pipelineai.py | 2 + langchain/llms/predictionguard.py | 2 + langchain/llms/promptlayer_openai.py | 18 +++- langchain/llms/replicate.py | 3 +- langchain/llms/rwkv.py | 1 + langchain/llms/sagemaker_endpoint.py | 2 + langchain/llms/self_hosted.py | 5 +- langchain/llms/self_hosted_hugging_face.py | 5 +- langchain/llms/stochasticai.py | 2 + langchain/llms/vertexai.py | 10 +- langchain/llms/writer.py | 5 +- tests/unit_tests/agents/test_agent.py | 1 + tests/unit_tests/agents/test_react.py | 1 + tests/unit_tests/chains/test_hyde.py | 4 +- tests/unit_tests/chains/test_natbot.py | 1 + tests/unit_tests/llms/fake_chat_model.py | 2 + tests/unit_tests/llms/fake_llm.py | 1 + 58 files changed, 289 insertions(+), 88 deletions(-) diff --git a/langchain/base_language.py b/langchain/base_language.py index 1b5bd0849d3ff..2587e8d245ecc 100644 --- a/langchain/base_language.py +++ b/langchain/base_language.py @@ -2,7 +2,7 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import List, Optional, Sequence, Set +from typing import Any, List, Optional, Sequence, Set from pydantic import BaseModel @@ -36,6 +36,7 @@ def generate_prompt( prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: """Take in a list of prompt values and return an LLMResult.""" @@ -45,26 +46,39 @@ async def agenerate_prompt( prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: """Take in a list of prompt values and return an LLMResult.""" @abstractmethod - def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: + def predict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: """Predict text from text.""" @abstractmethod def predict_messages( - self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None + self, + messages: List[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, ) -> BaseMessage: """Predict message from messages.""" @abstractmethod - async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: + async def apredict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: """Predict text from text.""" @abstractmethod async def apredict_messages( - self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None + self, + messages: List[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, ) -> BaseMessage: """Predict message from messages.""" diff --git a/langchain/chat_models/anthropic.py b/langchain/chat_models/anthropic.py index e913f6c4488b6..5f21cdeb3c908 100644 --- a/langchain/chat_models/anthropic.py +++ b/langchain/chat_models/anthropic.py @@ -94,9 +94,10 @@ def _generate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: prompt = self._convert_messages_to_prompt(messages) - params: Dict[str, Any] = {"prompt": prompt, **self._default_params} + params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs} if stop: params["stop_sequences"] = stop @@ -121,9 +122,10 @@ async def _agenerate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: prompt = self._convert_messages_to_prompt(messages) - params: Dict[str, Any] = {"prompt": prompt, **self._default_params} + params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs} if stop: params["stop_sequences"] = stop diff --git a/langchain/chat_models/base.py b/langchain/chat_models/base.py index dcb4ebebcfa55..05c1e8d55d279 100644 --- a/langchain/chat_models/base.py +++ b/langchain/chat_models/base.py @@ -64,6 +64,7 @@ def generate( messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: """Top Level call""" @@ -82,7 +83,7 @@ def generate( ) try: results = [ - self._generate(m, stop=stop, run_manager=run_manager) + self._generate(m, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported else self._generate(m, stop=stop) for m in messages @@ -103,6 +104,7 @@ async def agenerate( messages: List[List[BaseMessage]], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: """Top Level call""" params = self.dict() @@ -121,7 +123,7 @@ async def agenerate( try: results = await asyncio.gather( *[ - self._agenerate(m, stop=stop, run_manager=run_manager) + self._agenerate(m, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported else self._agenerate(m, stop=stop) for m in messages @@ -143,18 +145,22 @@ def generate_prompt( prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] - return self.generate(prompt_messages, stop=stop, callbacks=callbacks) + return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: prompt_messages = [p.to_messages() for p in prompts] - return await self.agenerate(prompt_messages, stop=stop, callbacks=callbacks) + return await self.agenerate( + prompt_messages, stop=stop, callbacks=callbacks, **kwargs + ) @abstractmethod def _generate( @@ -162,6 +168,7 @@ def _generate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: """Top Level call""" @@ -171,6 +178,7 @@ async def _agenerate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: """Top Level call""" @@ -193,18 +201,25 @@ async def _call_async( messages: List[BaseMessage], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> BaseMessage: - result = await self.agenerate([messages], stop=stop, callbacks=callbacks) + result = await self.agenerate( + [messages], stop=stop, callbacks=callbacks, **kwargs + ) generation = result.generations[0][0] if isinstance(generation, ChatGeneration): return generation.message else: raise ValueError("Unexpected generation type") - def call_as_llm(self, message: str, stop: Optional[List[str]] = None) -> str: - return self.predict(message, stop=stop) + def call_as_llm( + self, message: str, stop: Optional[List[str]] = None, **kwargs: Any + ) -> str: + return self.predict(message, stop=stop, **kwargs) - def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: + def predict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: if stop is None: _stop = None else: @@ -213,30 +228,42 @@ def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: return result.content def predict_messages( - self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None + self, + messages: List[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, ) -> BaseMessage: if stop is None: _stop = None else: _stop = list(stop) - return self(messages, stop=_stop) + return self(messages, stop=_stop, **kwargs) - async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: + async def apredict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: if stop is None: _stop = None else: _stop = list(stop) - result = await self._call_async([HumanMessage(content=text)], stop=_stop) + result = await self._call_async( + [HumanMessage(content=text)], stop=_stop, **kwargs + ) return result.content async def apredict_messages( - self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None + self, + messages: List[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, ) -> BaseMessage: if stop is None: _stop = None else: _stop = list(stop) - return await self._call_async(messages, stop=_stop) + return await self._call_async(messages, stop=_stop, **kwargs) @property def _identifying_params(self) -> Mapping[str, Any]: @@ -261,8 +288,9 @@ def _generate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: - output_str = self._call(messages, stop=stop, run_manager=run_manager) + output_str = self._call(messages, stop=stop, run_manager=run_manager, **kwargs) message = AIMessage(content=output_str) generation = ChatGeneration(message=message) return ChatResult(generations=[generation]) @@ -273,6 +301,7 @@ def _call( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Simpler interface.""" @@ -281,6 +310,9 @@ async def _agenerate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: - func = partial(self._generate, messages, stop=stop, run_manager=run_manager) + func = partial( + self._generate, messages, stop=stop, run_manager=run_manager, **kwargs + ) return await asyncio.get_event_loop().run_in_executor(None, func) diff --git a/langchain/chat_models/google_palm.py b/langchain/chat_models/google_palm.py index 0f305db8f4bef..74a1903de97ab 100644 --- a/langchain/chat_models/google_palm.py +++ b/langchain/chat_models/google_palm.py @@ -280,6 +280,7 @@ def _generate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: prompt = _messages_to_prompt_dict(messages) @@ -291,6 +292,7 @@ def _generate( top_p=self.top_p, top_k=self.top_k, candidate_count=self.n, + **kwargs, ) return _response_to_result(response, stop) @@ -300,6 +302,7 @@ async def _agenerate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: prompt = _messages_to_prompt_dict(messages) diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index 7aee780b8f559..d7c5183260331 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -302,8 +302,10 @@ def _generate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) + params = {**params, **kwargs} if self.streaming: inner_completion = "" role = "assistant" @@ -348,8 +350,10 @@ async def _agenerate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) + params = {**params, **kwargs} if self.streaming: inner_completion = "" role = "assistant" diff --git a/langchain/chat_models/promptlayer_openai.py b/langchain/chat_models/promptlayer_openai.py index 65865c1dc7f7a..ccb13b05b2bab 100644 --- a/langchain/chat_models/promptlayer_openai.py +++ b/langchain/chat_models/promptlayer_openai.py @@ -42,6 +42,7 @@ def _generate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any ) -> ChatResult: """Call ChatOpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request @@ -54,6 +55,7 @@ def _generate( response_dict, params = super()._create_message_dicts( [generation.message], stop ) + params = {**params, **kwargs} pl_request_id = promptlayer_api_request( "langchain.PromptLayerChatOpenAI", "langchain", @@ -79,6 +81,7 @@ async def _agenerate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any ) -> ChatResult: """Call ChatOpenAI agenerate and then call PromptLayer to log.""" from promptlayer.utils import get_api_key, promptlayer_api_request_async @@ -91,6 +94,7 @@ async def _agenerate( response_dict, params = super()._create_message_dicts( [generation.message], stop ) + params = {**params, **kwargs} pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerChatOpenAI.async", "langchain", diff --git a/langchain/chat_models/vertexai.py b/langchain/chat_models/vertexai.py index 4f78b310818c4..bd2ecbb2faa46 100644 --- a/langchain/chat_models/vertexai.py +++ b/langchain/chat_models/vertexai.py @@ -1,6 +1,6 @@ """Wrapper around Google VertexAI chat-based models.""" from dataclasses import dataclass, field -from typing import Dict, List, Optional +from typing import Any, Dict, List, Optional from pydantic import root_validator @@ -93,6 +93,7 @@ def _generate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: """Generate next turn in the conversation. @@ -119,7 +120,8 @@ def _generate( history = _parse_chat_history(messages[:-1]) context = history.system_message.content if history.system_message else None - chat = self.client.start_chat(context=context, **self._default_params) + params = {**self._default_params, **kwargs} + chat = self.client.start_chat(context=context, **params) for pair in history.history: chat._history.append((pair.question.content, pair.answer.content)) response = chat.send_message(question.content, **self._default_params) @@ -131,6 +133,7 @@ async def _agenerate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: raise NotImplementedError( """Vertex AI doesn't support async requests at the moment.""" diff --git a/langchain/experimental/llms/jsonformer_decoder.py b/langchain/experimental/llms/jsonformer_decoder.py index f0305f3f92e87..98a57dda62758 100644 --- a/langchain/experimental/llms/jsonformer_decoder.py +++ b/langchain/experimental/llms/jsonformer_decoder.py @@ -2,7 +2,7 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING, List, Optional, cast +from typing import TYPE_CHECKING, Any, List, Optional, cast from pydantic import Field, root_validator @@ -42,6 +42,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: jsonformer = import_jsonformer() from transformers import Text2TextGenerationPipeline diff --git a/langchain/experimental/llms/rellm_decoder.py b/langchain/experimental/llms/rellm_decoder.py index 8449b77555b0f..48a98fae01a7c 100644 --- a/langchain/experimental/llms/rellm_decoder.py +++ b/langchain/experimental/llms/rellm_decoder.py @@ -1,7 +1,7 @@ """Experimental implementation of RELLM wrapped LLM.""" from __future__ import annotations -from typing import TYPE_CHECKING, List, Optional, cast +from typing import TYPE_CHECKING, Any, List, Optional, cast from pydantic import Field, root_validator @@ -47,6 +47,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: rellm = import_rellm() from transformers import Text2TextGenerationPipeline diff --git a/langchain/llms/ai21.py b/langchain/llms/ai21.py index 181adb0bc0c1e..ae02e38fe7971 100644 --- a/langchain/llms/ai21.py +++ b/langchain/llms/ai21.py @@ -112,6 +112,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to AI21's complete endpoint. @@ -140,10 +141,11 @@ def _call( base_url = "https://api.ai21.com/studio/v1/experimental" else: base_url = "https://api.ai21.com/studio/v1" + params = {**self._default_params, **kwargs} response = requests.post( url=f"{base_url}/{self.model}/complete", headers={"Authorization": f"Bearer {self.ai21_api_key}"}, - json={"prompt": prompt, "stopSequences": stop, **self._default_params}, + json={"prompt": prompt, "stopSequences": stop, **params}, ) if response.status_code != 200: optional_detail = response.json().get("error") diff --git a/langchain/llms/aleph_alpha.py b/langchain/llms/aleph_alpha.py index 384fd2650637f..2090badb837fa 100644 --- a/langchain/llms/aleph_alpha.py +++ b/langchain/llms/aleph_alpha.py @@ -206,6 +206,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Aleph Alpha's completion endpoint. @@ -232,6 +233,7 @@ def _call( params["stop_sequences"] = self.stop_sequences else: params["stop_sequences"] = stop + params = {**params, **kwargs} request = CompletionRequest(prompt=Prompt.from_text(prompt), **params) response = self.client.complete(model=self.model, request=request) text = response.completions[0].completion diff --git a/langchain/llms/anthropic.py b/langchain/llms/anthropic.py index e9da0ae6b1209..83522b06c139f 100644 --- a/langchain/llms/anthropic.py +++ b/langchain/llms/anthropic.py @@ -162,6 +162,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: r"""Call out to Anthropic's completion endpoint. @@ -181,11 +182,12 @@ def _call( """ stop = self._get_anthropic_stop(stop) + params = {**self._default_params, **kwargs} if self.streaming: stream_resp = self.client.completion_stream( prompt=self._wrap_prompt(prompt), stop_sequences=stop, - **self._default_params, + **params, ) current_completion = "" for data in stream_resp: @@ -197,7 +199,7 @@ def _call( response = self.client.completion( prompt=self._wrap_prompt(prompt), stop_sequences=stop, - **self._default_params, + **params, ) return response["completion"] @@ -206,14 +208,16 @@ async def _acall( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Anthropic's completion endpoint asynchronously.""" stop = self._get_anthropic_stop(stop) + params = {**self._default_params, **kwargs} if self.streaming: stream_resp = await self.client.acompletion_stream( prompt=self._wrap_prompt(prompt), stop_sequences=stop, - **self._default_params, + **params, ) current_completion = "" async for data in stream_resp: @@ -225,7 +229,7 @@ async def _acall( response = await self.client.acompletion( prompt=self._wrap_prompt(prompt), stop_sequences=stop, - **self._default_params, + **params, ) return response["completion"] diff --git a/langchain/llms/anyscale.py b/langchain/llms/anyscale.py index 0128b6515d391..8baa9225a490a 100644 --- a/langchain/llms/anyscale.py +++ b/langchain/llms/anyscale.py @@ -88,6 +88,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Anyscale Service endpoint. Args: diff --git a/langchain/llms/aviary.py b/langchain/llms/aviary.py index 6f4a48a576de9..bd5a3ebd55fea 100644 --- a/langchain/llms/aviary.py +++ b/langchain/llms/aviary.py @@ -105,6 +105,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Aviary Args: diff --git a/langchain/llms/bananadev.py b/langchain/llms/bananadev.py index d0d604539d8af..2fc2f06077b24 100644 --- a/langchain/llms/bananadev.py +++ b/langchain/llms/bananadev.py @@ -87,6 +87,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call to Banana endpoint.""" try: @@ -97,6 +98,7 @@ def _call( "Please install it with `pip install banana-dev`." ) params = self.model_kwargs or {} + params = {**params, **kwargs} api_key = self.banana_api_key model_key = self.model_key model_inputs = { diff --git a/langchain/llms/base.py b/langchain/llms/base.py index 84ba2c5c86d86..866bdada53171 100644 --- a/langchain/llms/base.py +++ b/langchain/llms/base.py @@ -113,6 +113,7 @@ def _generate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompts.""" @@ -122,6 +123,7 @@ async def _agenerate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompts.""" @@ -130,24 +132,29 @@ def generate_prompt( prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] - return self.generate(prompt_strings, stop=stop, callbacks=callbacks) + return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs) async def agenerate_prompt( self, prompts: List[PromptValue], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: prompt_strings = [p.to_string() for p in prompts] - return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks) + return await self.agenerate( + prompt_strings, stop=stop, callbacks=callbacks, **kwargs + ) def generate( self, prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # If string is passed in directly no errors will be raised but outputs will @@ -183,9 +190,11 @@ def generate( ) try: output = ( - self._generate(prompts, stop=stop, run_manager=run_manager) + self._generate( + prompts, stop=stop, run_manager=run_manager, **kwargs + ) if new_arg_supported - else self._generate(prompts, stop=stop) + else self._generate(prompts, stop=stop, **kwargs) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) @@ -202,9 +211,11 @@ def generate( ) try: new_results = ( - self._generate(missing_prompts, stop=stop, run_manager=run_manager) + self._generate( + missing_prompts, stop=stop, run_manager=run_manager, **kwargs + ) if new_arg_supported - else self._generate(missing_prompts, stop=stop) + else self._generate(missing_prompts, stop=stop, **kwargs) ) except (KeyboardInterrupt, Exception) as e: run_manager.on_llm_error(e) @@ -227,6 +238,7 @@ async def agenerate( prompts: List[str], stop: Optional[List[str]] = None, callbacks: Callbacks = None, + **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" params = self.dict() @@ -255,9 +267,11 @@ async def agenerate( ) try: output = ( - await self._agenerate(prompts, stop=stop, run_manager=run_manager) + await self._agenerate( + prompts, stop=stop, run_manager=run_manager, **kwargs + ) if new_arg_supported - else await self._agenerate(prompts, stop=stop) + else await self._agenerate(prompts, stop=stop, **kwargs) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e, verbose=self.verbose) @@ -275,10 +289,10 @@ async def agenerate( try: new_results = ( await self._agenerate( - missing_prompts, stop=stop, run_manager=run_manager + missing_prompts, stop=stop, run_manager=run_manager, **kwargs ) if new_arg_supported - else await self._agenerate(missing_prompts, stop=stop) + else await self._agenerate(missing_prompts, stop=stop, **kwargs) ) except (KeyboardInterrupt, Exception) as e: await run_manager.on_llm_error(e) @@ -297,7 +311,11 @@ async def agenerate( return LLMResult(generations=generations, llm_output=llm_output, run=run_info) def __call__( - self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None + self, + prompt: str, + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, ) -> str: """Check Cache and run the LLM on the given prompt and input.""" if not isinstance(prompt, str): @@ -307,52 +325,70 @@ def __call__( "`generate` instead." ) return ( - self.generate([prompt], stop=stop, callbacks=callbacks) + self.generate([prompt], stop=stop, callbacks=callbacks, **kwargs) .generations[0][0] .text ) async def _call_async( - self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None + self, + prompt: str, + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs: Any, ) -> str: """Check Cache and run the LLM on the given prompt and input.""" - result = await self.agenerate([prompt], stop=stop, callbacks=callbacks) + result = await self.agenerate( + [prompt], stop=stop, callbacks=callbacks, **kwargs + ) return result.generations[0][0].text - def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: + def predict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: if stop is None: _stop = None else: _stop = list(stop) - return self(text, stop=_stop) + return self(text, stop=_stop, **kwargs) def predict_messages( - self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None + self, + messages: List[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) - content = self(text, stop=_stop) + content = self(text, stop=_stop, **kwargs) return AIMessage(content=content) - async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str: + async def apredict( + self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any + ) -> str: if stop is None: _stop = None else: _stop = list(stop) - return await self._call_async(text, stop=_stop) + return await self._call_async(text, stop=_stop, **kwargs) async def apredict_messages( - self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None + self, + messages: List[BaseMessage], + *, + stop: Optional[Sequence[str]] = None, + **kwargs: Any, ) -> BaseMessage: text = get_buffer_string(messages) if stop is None: _stop = None else: _stop = list(stop) - content = await self._call_async(text, stop=_stop) + content = await self._call_async(text, stop=_stop, **kwargs) return AIMessage(content=content) @property @@ -422,6 +458,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Run the LLM on the given prompt and input.""" @@ -430,6 +467,7 @@ async def _acall( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Run the LLM on the given prompt and input.""" raise NotImplementedError("Async generation not implemented for this LLM.") @@ -439,6 +477,7 @@ def _generate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" # TODO: add caching here. @@ -446,9 +485,9 @@ def _generate( new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") for prompt in prompts: text = ( - self._call(prompt, stop=stop, run_manager=run_manager) + self._call(prompt, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported - else self._call(prompt, stop=stop) + else self._call(prompt, stop=stop, **kwargs) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) @@ -458,15 +497,16 @@ async def _agenerate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: """Run the LLM on the given prompt and input.""" generations = [] new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") for prompt in prompts: text = ( - await self._acall(prompt, stop=stop, run_manager=run_manager) + await self._acall(prompt, stop=stop, run_manager=run_manager, **kwargs) if new_arg_supported - else await self._acall(prompt, stop=stop) + else await self._acall(prompt, stop=stop, **kwargs) ) generations.append([Generation(text=text)]) return LLMResult(generations=generations) diff --git a/langchain/llms/baseten.py b/langchain/llms/baseten.py index 5637fc41570b2..a5a314c4373cb 100644 --- a/langchain/llms/baseten.py +++ b/langchain/llms/baseten.py @@ -54,6 +54,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call to Baseten deployed model endpoint.""" try: diff --git a/langchain/llms/beam.py b/langchain/llms/beam.py index d7d3f27ca5a0a..d29461af7823d 100644 --- a/langchain/llms/beam.py +++ b/langchain/llms/beam.py @@ -251,10 +251,12 @@ def _call( prompt: str, stop: Optional[list] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call to Beam.""" url = "https://apps.beam.cloud/" + self.app_id if self.app_id else self.url payload = {"prompt": prompt, "max_length": self.max_length} + payload.update(kwargs) headers = { "Accept": "*/*", "Accept-Encoding": "gzip, deflate", diff --git a/langchain/llms/bedrock.py b/langchain/llms/bedrock.py index b87f8483f7212..884202a5a43d2 100644 --- a/langchain/llms/bedrock.py +++ b/langchain/llms/bedrock.py @@ -155,6 +155,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Bedrock service model. @@ -173,10 +174,8 @@ def _call( _model_kwargs = self.model_kwargs or {} provider = self.model_id.split(".")[0] - - input_body = LLMInputOutputAdapter.prepare_input( - provider, prompt, _model_kwargs - ) + params = {**_model_kwargs, **kwargs} + input_body = LLMInputOutputAdapter.prepare_input(provider, prompt, params) body = json.dumps(input_body) accept = "application/json" contentType = "application/json" diff --git a/langchain/llms/cerebriumai.py b/langchain/llms/cerebriumai.py index dac1f48a1727a..4e0d159ca7fad 100644 --- a/langchain/llms/cerebriumai.py +++ b/langchain/llms/cerebriumai.py @@ -88,6 +88,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call to CerebriumAI endpoint.""" try: @@ -100,7 +101,9 @@ def _call( params = self.model_kwargs or {} response = model_api_request( - self.endpoint_url, {"prompt": prompt, **params}, self.cerebriumai_api_key + self.endpoint_url, + {"prompt": prompt, **params, **kwargs}, + self.cerebriumai_api_key, ) text = response["data"]["result"] if stop is not None: diff --git a/langchain/llms/cohere.py b/langchain/llms/cohere.py index 08043720273f3..7fd181fe3f724 100644 --- a/langchain/llms/cohere.py +++ b/langchain/llms/cohere.py @@ -145,6 +145,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Cohere's generate endpoint. @@ -167,7 +168,7 @@ def _call( params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop - + params = {**params, **kwargs} response = completion_with_retry( self, model=self.model, prompt=prompt, **params ) diff --git a/langchain/llms/ctransformers.py b/langchain/llms/ctransformers.py index 617d56dc229d1..52223ece672b5 100644 --- a/langchain/llms/ctransformers.py +++ b/langchain/llms/ctransformers.py @@ -81,6 +81,7 @@ def _call( prompt: str, stop: Optional[Sequence[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Generate text from a prompt. diff --git a/langchain/llms/databricks.py b/langchain/llms/databricks.py index b0e0007c77d1f..6fa2fd44e1f72 100644 --- a/langchain/llms/databricks.py +++ b/langchain/llms/databricks.py @@ -303,12 +303,14 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Queries the LLM endpoint with the given prompt and stop sequence.""" # TODO: support callbacks request = {"prompt": prompt, "stop": stop} + request.update(kwargs) if self.model_kwargs: request.update(self.model_kwargs) diff --git a/langchain/llms/deepinfra.py b/langchain/llms/deepinfra.py index 6e18f2e23bb4b..0cf5768e4ba84 100644 --- a/langchain/llms/deepinfra.py +++ b/langchain/llms/deepinfra.py @@ -66,6 +66,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to DeepInfra's inference API endpoint. @@ -82,6 +83,7 @@ def _call( response = di("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} + _model_kwargs = {**_model_kwargs, **kwargs} # HTTP headers for authorization headers = { "Authorization": f"bearer {self.deepinfra_api_token}", diff --git a/langchain/llms/fake.py b/langchain/llms/fake.py index 5700e82f2d9f7..3d61a951bb7e7 100644 --- a/langchain/llms/fake.py +++ b/langchain/llms/fake.py @@ -24,6 +24,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Return next response""" response = self.responses[self.i] @@ -35,6 +36,7 @@ async def _acall( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Return next response""" response = self.responses[self.i] diff --git a/langchain/llms/forefrontai.py b/langchain/llms/forefrontai.py index 8c49918abd606..16a0ac48d43a5 100644 --- a/langchain/llms/forefrontai.py +++ b/langchain/llms/forefrontai.py @@ -87,6 +87,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to ForefrontAI's complete endpoint. @@ -108,7 +109,7 @@ def _call( "Authorization": f"Bearer {self.forefrontai_api_key}", "Content-Type": "application/json", }, - json={"text": prompt, **self._default_params}, + json={"text": prompt, **self._default_params, **kwargs}, ) response_json = response.json() text = response_json["result"][0]["completion"] diff --git a/langchain/llms/google_palm.py b/langchain/llms/google_palm.py index 530cc2e9fdc67..cc5a91880edbb 100644 --- a/langchain/llms/google_palm.py +++ b/langchain/llms/google_palm.py @@ -134,6 +134,7 @@ def _generate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: generations = [] for prompt in prompts: @@ -147,6 +148,7 @@ def _generate( top_k=self.top_k, max_output_tokens=self.max_output_tokens, candidate_count=self.n, + **kwargs, ) prompt_generations = [] @@ -163,6 +165,7 @@ async def _agenerate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: raise NotImplementedError() diff --git a/langchain/llms/gooseai.py b/langchain/llms/gooseai.py index 0271d039e7c61..73476e04860ac 100644 --- a/langchain/llms/gooseai.py +++ b/langchain/llms/gooseai.py @@ -137,6 +137,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call the GooseAI API.""" params = self._default_params @@ -145,6 +146,8 @@ def _call( raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop + params = {**params, **kwargs} + response = self.client.create(engine=self.model_name, prompt=prompt, **params) text = response.choices[0].text return text diff --git a/langchain/llms/gpt4all.py b/langchain/llms/gpt4all.py index f52a0915c40bb..b9d37ad38cdf7 100644 --- a/langchain/llms/gpt4all.py +++ b/langchain/llms/gpt4all.py @@ -183,6 +183,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: r"""Call out to GPT4All's generate method. @@ -203,7 +204,8 @@ def _call( if run_manager: text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose) text = "" - for token in self.client.generate(prompt, **self._default_params()): + params = {**self._default_params(), **kwargs} + for token in self.client.generate(prompt, **params): if text_callback: text_callback(token) text += token diff --git a/langchain/llms/huggingface_endpoint.py b/langchain/llms/huggingface_endpoint.py index 03b0467be73f4..ff83b0baeaa3e 100644 --- a/langchain/llms/huggingface_endpoint.py +++ b/langchain/llms/huggingface_endpoint.py @@ -96,6 +96,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to HuggingFace Hub's inference endpoint. @@ -114,7 +115,8 @@ def _call( _model_kwargs = self.model_kwargs or {} # payload samples - parameter_payload = {"inputs": prompt, "parameters": _model_kwargs} + params = {**_model_kwargs, **kwargs} + parameter_payload = {"inputs": prompt, "parameters": params} # HTTP headers for authorization headers = { diff --git a/langchain/llms/huggingface_hub.py b/langchain/llms/huggingface_hub.py index 5cd7e2420f64d..cefa1bcc193b9 100644 --- a/langchain/llms/huggingface_hub.py +++ b/langchain/llms/huggingface_hub.py @@ -91,6 +91,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to HuggingFace Hub's inference endpoint. @@ -107,7 +108,8 @@ def _call( response = hf("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} - response = self.client(inputs=prompt, params=_model_kwargs) + params = {**_model_kwargs, **kwargs} + response = self.client(inputs=prompt, params=params) if "error" in response: raise ValueError(f"Error raised by inference API: {response['error']}") if self.client.task == "text-generation": diff --git a/langchain/llms/huggingface_pipeline.py b/langchain/llms/huggingface_pipeline.py index 615dcd8e3f020..f10f93354f17d 100644 --- a/langchain/llms/huggingface_pipeline.py +++ b/langchain/llms/huggingface_pipeline.py @@ -164,6 +164,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: response = self.pipeline(prompt) if self.pipeline.task == "text-generation": diff --git a/langchain/llms/huggingface_text_gen_inference.py b/langchain/llms/huggingface_text_gen_inference.py index d121b3b9de765..3d17c73418d63 100644 --- a/langchain/llms/huggingface_text_gen_inference.py +++ b/langchain/llms/huggingface_text_gen_inference.py @@ -113,6 +113,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: if stop is None: stop = self.stop_sequences @@ -130,6 +131,7 @@ def _call( temperature=self.temperature, repetition_penalty=self.repetition_penalty, seed=self.seed, + **kwargs, ) # remove stop sequences from the end of the generated text for stop_seq in stop: diff --git a/langchain/llms/human.py b/langchain/llms/human.py index d0ceefdbc15bf..d585ee8edcf4f 100644 --- a/langchain/llms/human.py +++ b/langchain/llms/human.py @@ -60,6 +60,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """ Displays the prompt to the user and returns their input as a response. diff --git a/langchain/llms/llamacpp.py b/langchain/llms/llamacpp.py index ff10f41808ccd..a28233b67bdbe 100644 --- a/langchain/llms/llamacpp.py +++ b/langchain/llms/llamacpp.py @@ -200,6 +200,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call the Llama model and return the output. @@ -227,6 +228,7 @@ def _call( return combined_text_output else: params = self._get_parameters(stop) + params = {**params, **kwargs} result = self.client(prompt=prompt, **params) return result["choices"][0]["text"] diff --git a/langchain/llms/manifest.py b/langchain/llms/manifest.py index 0cef977e34806..cd04c149ffa93 100644 --- a/langchain/llms/manifest.py +++ b/langchain/llms/manifest.py @@ -48,13 +48,15 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to LLM through Manifest.""" if stop is not None and len(stop) != 1: raise NotImplementedError( f"Manifest currently only supports a single stop token, got {stop}" ) - kwargs = self.llm_kwargs or {} + params = self.llm_kwargs or {} + params = {**params, **kwargs} if stop is not None: - kwargs["stop_token"] = stop - return self.client.run(prompt, **kwargs) + params["stop_token"] = stop + return self.client.run(prompt, **params) diff --git a/langchain/llms/modal.py b/langchain/llms/modal.py index 338a6b42f59e4..a6cbd601a4ad1 100644 --- a/langchain/llms/modal.py +++ b/langchain/llms/modal.py @@ -76,9 +76,11 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call to Modal endpoint.""" params = self.model_kwargs or {} + params = {**params, **kwargs} response = requests.post( url=self.endpoint_url, headers={ diff --git a/langchain/llms/mosaicml.py b/langchain/llms/mosaicml.py index 0a8b8561e2c38..b225a1aeb5800 100644 --- a/langchain/llms/mosaicml.py +++ b/langchain/llms/mosaicml.py @@ -102,6 +102,7 @@ def _call( stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, is_retry: bool = False, + **kwargs: Any, ) -> str: """Call out to a MosaicML LLM inference endpoint. @@ -123,6 +124,7 @@ def _call( payload = {"input_strings": [prompt]} payload.update(_model_kwargs) + payload.update(kwargs) # HTTP headers for authorization headers = { diff --git a/langchain/llms/nlpcloud.py b/langchain/llms/nlpcloud.py index d901e6b76d85d..aa2a62df1ab41 100644 --- a/langchain/llms/nlpcloud.py +++ b/langchain/llms/nlpcloud.py @@ -117,6 +117,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to NLPCloud's create endpoint. @@ -141,7 +142,6 @@ def _call( end_sequence = stop[0] else: end_sequence = None - response = self.client.generation( - prompt, end_sequence=end_sequence, **self._default_params - ) + params = {**self._default_params, **kwargs} + response = self.client.generation(prompt, end_sequence=end_sequence, **params) return response["generated_text"] diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index ad494971d22a2..bb1c021283a47 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -273,6 +273,7 @@ def _generate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. @@ -290,6 +291,7 @@ def _generate( """ # TODO: write a unit test for this params = self._invocation_params + params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} @@ -326,9 +328,11 @@ async def _agenerate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params + params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} @@ -771,8 +775,10 @@ def _generate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) + params = {**params, **kwargs} if self.streaming: response = "" params["stream"] = True @@ -804,8 +810,10 @@ async def _agenerate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) + params = {**params, **kwargs} if self.streaming: response = "" params["stream"] = True diff --git a/langchain/llms/petals.py b/langchain/llms/petals.py index f407bcf280233..bf547ab6311cf 100644 --- a/langchain/llms/petals.py +++ b/langchain/llms/petals.py @@ -137,9 +137,11 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call the Petals API.""" params = self._default_params + params = {**params, **kwargs} inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"] outputs = self.client.generate(inputs, **params) text = self.tokenizer.decode(outputs[0]) diff --git a/langchain/llms/pipelineai.py b/langchain/llms/pipelineai.py index 677504056c125..1e0e7f8bdec32 100644 --- a/langchain/llms/pipelineai.py +++ b/langchain/llms/pipelineai.py @@ -87,6 +87,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call to Pipeline Cloud endpoint.""" try: @@ -98,6 +99,7 @@ def _call( ) client = PipelineCloud(token=self.pipeline_api_key) params = self.pipeline_kwargs or {} + params = {**params, **kwargs} run = client.run_pipeline(self.pipeline_key, [prompt, params]) try: diff --git a/langchain/llms/predictionguard.py b/langchain/llms/predictionguard.py index ee2a9d4b78ded..b024a30fda15e 100644 --- a/langchain/llms/predictionguard.py +++ b/langchain/llms/predictionguard.py @@ -91,6 +91,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Prediction Guard's model API. Args: @@ -117,6 +118,7 @@ def _call( output=self.output, temperature=params["temperature"], max_tokens=params["max_tokens"], + **kwargs, ) text = response["choices"][0]["text"] diff --git a/langchain/llms/promptlayer_openai.py b/langchain/llms/promptlayer_openai.py index 6454ed804fc4d..93176f2382ba6 100644 --- a/langchain/llms/promptlayer_openai.py +++ b/langchain/llms/promptlayer_openai.py @@ -1,6 +1,6 @@ """PromptLayer wrapper.""" import datetime -from typing import List, Optional +from typing import Any, List, Optional from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, @@ -42,6 +42,7 @@ def _generate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request @@ -56,11 +57,12 @@ def _generate( "text": generation.text, "llm_output": generated_responses.llm_output, } + params = {**self._identifying_params, **kwargs} pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAI", "langchain", [prompt], - self._identifying_params, + params, self.pl_tags, resp, request_start_time, @@ -81,6 +83,7 @@ async def _agenerate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async @@ -94,11 +97,12 @@ async def _agenerate( "text": generation.text, "llm_output": generated_responses.llm_output, } + params = {**self._identifying_params, **kwargs} pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAI.async", "langchain", [prompt], - self._identifying_params, + params, self.pl_tags, resp, request_start_time, @@ -147,6 +151,7 @@ def _generate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: """Call OpenAI generate and then call PromptLayer API to log the request.""" from promptlayer.utils import get_api_key, promptlayer_api_request @@ -161,11 +166,12 @@ def _generate( "text": generation.text, "llm_output": generated_responses.llm_output, } + params = {**self._identifying_params, **kwargs} pl_request_id = promptlayer_api_request( "langchain.PromptLayerOpenAIChat", "langchain", [prompt], - self._identifying_params, + params, self.pl_tags, resp, request_start_time, @@ -186,6 +192,7 @@ async def _agenerate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: from promptlayer.utils import get_api_key, promptlayer_api_request_async @@ -199,11 +206,12 @@ async def _agenerate( "text": generation.text, "llm_output": generated_responses.llm_output, } + params = {**self._identifying_params, **kwargs} pl_request_id = await promptlayer_api_request_async( "langchain.PromptLayerOpenAIChat.async", "langchain", [prompt], - self._identifying_params, + params, self.pl_tags, resp, request_start_time, diff --git a/langchain/llms/replicate.py b/langchain/llms/replicate.py index 10c727bdd534b..f4660f6b3712a 100644 --- a/langchain/llms/replicate.py +++ b/langchain/llms/replicate.py @@ -85,6 +85,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call to replicate endpoint.""" try: @@ -110,6 +111,6 @@ def _call( first_input_name = input_properties[0][0] inputs = {first_input_name: prompt, **self.input} - iterator = replicate_python.run(self.model, input={**inputs}) + iterator = replicate_python.run(self.model, input={**inputs, **kwargs}) return "".join([output for output in iterator]) diff --git a/langchain/llms/rwkv.py b/langchain/llms/rwkv.py index b2643d905835c..af8703cb19851 100644 --- a/langchain/llms/rwkv.py +++ b/langchain/llms/rwkv.py @@ -210,6 +210,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: r"""RWKV generation diff --git a/langchain/llms/sagemaker_endpoint.py b/langchain/llms/sagemaker_endpoint.py index f793aae141ec4..0c262a3ccf1bd 100644 --- a/langchain/llms/sagemaker_endpoint.py +++ b/langchain/llms/sagemaker_endpoint.py @@ -207,6 +207,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Sagemaker inference endpoint. @@ -223,6 +224,7 @@ def _call( response = se("Tell me a joke.") """ _model_kwargs = self.model_kwargs or {} + _model_kwargs = {**_model_kwargs, **kwargs} _endpoint_kwargs = self.endpoint_kwargs or {} body = self.content_handler.transform_input(prompt, _model_kwargs) diff --git a/langchain/llms/self_hosted.py b/langchain/llms/self_hosted.py index 7d36643b982dd..0eaf8ec0a857f 100644 --- a/langchain/llms/self_hosted.py +++ b/langchain/llms/self_hosted.py @@ -214,5 +214,8 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: - return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) + return self.client( + pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs + ) diff --git a/langchain/llms/self_hosted_hugging_face.py b/langchain/llms/self_hosted_hugging_face.py index 1ef685a5dea66..e88d3fb9350a4 100644 --- a/langchain/llms/self_hosted_hugging_face.py +++ b/langchain/llms/self_hosted_hugging_face.py @@ -207,5 +207,8 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: - return self.client(pipeline=self.pipeline_ref, prompt=prompt, stop=stop) + return self.client( + pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs + ) diff --git a/langchain/llms/stochasticai.py b/langchain/llms/stochasticai.py index 5d2fe7300ec4f..14bc0b7039889 100644 --- a/langchain/llms/stochasticai.py +++ b/langchain/llms/stochasticai.py @@ -86,6 +86,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to StochasticAI's complete endpoint. @@ -102,6 +103,7 @@ def _call( response = StochasticAI("Tell me a joke.") """ params = self.model_kwargs or {} + params = {**params, **kwargs} response_post = requests.post( url=self.api_url, json={"prompt": prompt, "params": params}, diff --git a/langchain/llms/vertexai.py b/langchain/llms/vertexai.py index 16266b49dd5c2..522c8cd524f8b 100644 --- a/langchain/llms/vertexai.py +++ b/langchain/llms/vertexai.py @@ -50,8 +50,11 @@ def _default_params(self) -> Dict[str, Any]: } return {**base_params} - def _predict(self, prompt: str, stop: Optional[List[str]] = None) -> str: - res = self.client.predict(prompt, **self._default_params) + def _predict( + self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any + ) -> str: + params = {**self._default_params, **kwargs} + res = self.client.predict(prompt, **params) return self._enforce_stop_words(res.text, stop) def _enforce_stop_words(self, text: str, stop: Optional[List[str]] = None) -> str: @@ -100,6 +103,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call Vertex model to get predictions based on the prompt. @@ -111,4 +115,4 @@ def _call( Returns: The string generated by the model. """ - return self._predict(prompt, stop) + return self._predict(prompt, stop, **kwargs) diff --git a/langchain/llms/writer.py b/langchain/llms/writer.py index d704205d65b7a..1767d8b432b14 100644 --- a/langchain/llms/writer.py +++ b/langchain/llms/writer.py @@ -118,6 +118,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Call out to Writer's completions endpoint. @@ -141,7 +142,7 @@ def _call( f"/organization/{self.writer_org_id}" f"/model/{self.model_id}/completions" ) - + params = {**self._default_params, **kwargs} response = requests.post( url=base_url, headers={ @@ -149,7 +150,7 @@ def _call( "Content-Type": "application/json", "Accept": "application/json", }, - json={"prompt": prompt, **self._default_params}, + json={"prompt": prompt, **params}, ) text = response.text if stop is not None: diff --git a/tests/unit_tests/agents/test_agent.py b/tests/unit_tests/agents/test_agent.py index 3a03f03f15b50..ac89aa4a768c1 100644 --- a/tests/unit_tests/agents/test_agent.py +++ b/tests/unit_tests/agents/test_agent.py @@ -20,6 +20,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Increment counter, and then return response in that index.""" self.i += 1 diff --git a/tests/unit_tests/agents/test_react.py b/tests/unit_tests/agents/test_react.py index 8f2a3ff2fc50d..ca3ffa2517c86 100644 --- a/tests/unit_tests/agents/test_react.py +++ b/tests/unit_tests/agents/test_react.py @@ -38,6 +38,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Increment counter, and then return response in that index.""" self.i += 1 diff --git a/tests/unit_tests/chains/test_hyde.py b/tests/unit_tests/chains/test_hyde.py index dd2ade83c1825..e189c84e898a3 100644 --- a/tests/unit_tests/chains/test_hyde.py +++ b/tests/unit_tests/chains/test_hyde.py @@ -1,5 +1,5 @@ """Test HyDE.""" -from typing import List, Optional +from typing import Any, List, Optional import numpy as np @@ -36,6 +36,7 @@ def _generate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]]) @@ -44,6 +45,7 @@ async def _agenerate( prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> LLMResult: return LLMResult(generations=[[Generation(text="foo") for _ in range(self.n)]]) diff --git a/tests/unit_tests/chains/test_natbot.py b/tests/unit_tests/chains/test_natbot.py index 77c29808433a2..e5f68ab500628 100644 --- a/tests/unit_tests/chains/test_natbot.py +++ b/tests/unit_tests/chains/test_natbot.py @@ -15,6 +15,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: """Return `foo` if longer than 10000 words, else `bar`.""" if len(prompt) > 10000: diff --git a/tests/unit_tests/llms/fake_chat_model.py b/tests/unit_tests/llms/fake_chat_model.py index c8705d1c7fc9e..f68a7532d28ad 100644 --- a/tests/unit_tests/llms/fake_chat_model.py +++ b/tests/unit_tests/llms/fake_chat_model.py @@ -17,6 +17,7 @@ def _call( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: return "fake response" @@ -25,6 +26,7 @@ async def _agenerate( messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> ChatResult: output_str = "fake response" message = AIMessage(content=output_str) diff --git a/tests/unit_tests/llms/fake_llm.py b/tests/unit_tests/llms/fake_llm.py index 8815cc0b82809..71c2f0b3657da 100644 --- a/tests/unit_tests/llms/fake_llm.py +++ b/tests/unit_tests/llms/fake_llm.py @@ -34,6 +34,7 @@ def _call( prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, ) -> str: if self.sequential_responses: return self._get_next_response_in_sequence From 20e9ce8a62a0747dcfb6494eddf5eb10760b9982 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sun, 11 Jun 2023 10:14:57 -0700 Subject: [PATCH 23/46] bump version to 197 (#6007) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 40fc00be451ed..d8fbca994fadb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain" -version = "0.0.196" +version = "0.0.197" description = "Building applications with LLMs through composability" authors = [] license = "MIT" From c868a3eef3368ecae547ad8637cf8e163326e773 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Mon, 12 Jun 2023 05:13:33 +0900 Subject: [PATCH 24/46] Update databricks.md (#6006) HuggingFace -> Hugging Face #### Before submitting #### Who can review? --- docs/integrations/databricks.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/integrations/databricks.md b/docs/integrations/databricks.md index 0a81ce6ab0096..f85523e4c85d6 100644 --- a/docs/integrations/databricks.md +++ b/docs/integrations/databricks.md @@ -8,7 +8,7 @@ Databricks embraces the LangChain ecosystem in various ways: 1. Databricks connector for the SQLDatabase Chain: SQLDatabase.from_databricks() provides an easy way to query your data on Databricks through LangChain 2. Databricks-managed MLflow integrates with LangChain: Tracking and serving LangChain applications with fewer steps 3. Databricks as an LLM provider: Deploy your fine-tuned LLMs on Databricks via serving endpoints or cluster driver proxy apps, and query it as langchain.llms.Databricks -4. Databricks Dolly: Databricks open-sourced Dolly which allows for commercial use, and can be accessed through the HuggingFace Hub +4. Databricks Dolly: Databricks open-sourced Dolly which allows for commercial use, and can be accessed through the Hugging Face Hub Databricks connector for the SQLDatabase Chain ---------------------------------------------- @@ -28,9 +28,9 @@ Databricks as an LLM provider The notebook [Wrap Databricks endpoints as LLMs](../modules/models/llms/integrations/databricks.html) illustrates the method to wrap Databricks endpoints as LLMs in LangChain. It supports two types of endpoints: the serving endpoint, which is recommended for both production and development, and the cluster driver proxy app, which is recommended for interactive development. -Databricks endpoints support Dolly, but are also great for hosting models like MPT-7B or any other models from the HuggingFace ecosystem. Databricks endpoints can also be used with proprietary models like OpenAI to provide a governance layer for enterprises. +Databricks endpoints support Dolly, but are also great for hosting models like MPT-7B or any other models from the Hugging Face ecosystem. Databricks endpoints can also be used with proprietary models like OpenAI to provide a governance layer for enterprises. Databricks Dolly ---------------- -Databricks’ Dolly is an instruction-following large language model trained on the Databricks machine learning platform that is licensed for commercial use. The model is available on Hugging Face Hub as databricks/dolly-v2-12b. See the notebook [HuggingFace Hub](../modules/models/llms/integrations/huggingface_hub.html) for instructions to access it through the HuggingFace Hub integration with LangChain. +Databricks’ Dolly is an instruction-following large language model trained on the Databricks machine learning platform that is licensed for commercial use. The model is available on Hugging Face Hub as databricks/dolly-v2-12b. See the notebook [Hugging Face Hub](../modules/models/llms/integrations/huggingface_hub.html) for instructions to access it through the Hugging Face Hub integration with LangChain. From 6e90406e0f87595ca4d858dbb1e48ef0abaaf854 Mon Sep 17 00:00:00 2001 From: "Jiaping(JP) Zhang" Date: Sun, 11 Jun 2023 13:13:57 -0700 Subject: [PATCH 25/46] [APIChain] enhance the robustness or url (#6008) I used the APIChain sometimes it failed during the intermediate step when generating the api url and calling the `request` function. After some digging, I found the url sometimes includes the space at the beginning, like `%20https://...api.com` which causes the ` self.requests_wrapper.get` internal function to fail. Including a little string preprocessing `.strip` to remove the space seems to improve the robustness of the APIchain to make sure it can send the request and retrieve the API result more reliably. Fixes # (issue) #### Before submitting #### Who can review? @vowelparrot Tag maintainers/contributors who might be interested: --- langchain/chains/api/base.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/langchain/chains/api/base.py b/langchain/chains/api/base.py index e5af03a0097fb..7e199fe42faf4 100644 --- a/langchain/chains/api/base.py +++ b/langchain/chains/api/base.py @@ -78,6 +78,7 @@ def _call( callbacks=_run_manager.get_child(), ) _run_manager.on_text(api_url, color="green", end="\n", verbose=self.verbose) + api_url = api_url.strip() api_response = self.requests_wrapper.get(api_url) _run_manager.on_text( api_response, color="yellow", end="\n", verbose=self.verbose @@ -106,6 +107,7 @@ async def _acall( await _run_manager.on_text( api_url, color="green", end="\n", verbose=self.verbose ) + api_url = api_url.strip() api_response = await self.requests_wrapper.aget(api_url) await _run_manager.on_text( api_response, color="yellow", end="\n", verbose=self.verbose From d7d629911b90154887bdc6c45711a67d01f9fae3 Mon Sep 17 00:00:00 2001 From: Akhil Vempali Date: Mon, 12 Jun 2023 01:50:03 +0530 Subject: [PATCH 26/46] feat: :sparkles: Added filtering option to FAISS vectorstore (#5966) Inspired by the filtering capability available in ChromaDB, added the same functionality to the FAISS vectorestore as well. Since FAISS does not have an inbuilt method of filtering used the approach suggested in this [thread](https://github.com/facebookresearch/faiss/issues/1079) Langchain Issue inspiration: https://github.com/hwchase17/langchain/issues/4572 - [x] Added filtering capability to semantic similarly and MMR - [x] Added test cases for filtering in `tests/integration_tests/vectorstores/test_faiss.py` #### Who can review? Tag maintainers/contributors who might be interested: VectorStores / Retrievers / Memory - @dev2049 - @hwchase17 --- .../indexes/vectorstores/examples/faiss.ipynb | 205 ++++++++++++++---- .../examples/faiss_index/index.faiss | Bin 0 -> 258093 bytes langchain/vectorstores/faiss.py | 112 ++++++++-- .../vectorstores/test_faiss.py | 22 ++ 4 files changed, 281 insertions(+), 58 deletions(-) create mode 100644 docs/modules/indexes/vectorstores/examples/faiss_index/index.faiss diff --git a/docs/modules/indexes/vectorstores/examples/faiss.ipynb b/docs/modules/indexes/vectorstores/examples/faiss.ipynb index d967068e13c62..c932ea4e1530d 100644 --- a/docs/modules/indexes/vectorstores/examples/faiss.ipynb +++ b/docs/modules/indexes/vectorstores/examples/faiss.ipynb @@ -40,20 +40,12 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 2, "id": "47f9b495-88f1-4286-8d5d-1416103931a7", "metadata": { "tags": [] }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OpenAI API Key: ········\n" - ] - } - ], + "outputs": [], "source": [ "import os\n", "import getpass\n", @@ -66,7 +58,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "aac9563e", "metadata": { "tags": [] @@ -81,7 +73,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "id": "a3c3999a", "metadata": { "tags": [] @@ -99,7 +91,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 11, "id": "5eabdb75", "metadata": { "tags": [] @@ -114,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 12, "id": "4b172de8", "metadata": { "tags": [] @@ -150,7 +142,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 13, "id": "186ee1d8", "metadata": {}, "outputs": [], @@ -160,18 +152,18 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 14, "id": "284e04b5", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "(Document(page_content='In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \\n\\nWe cannot let this happen. \\n\\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0),\n", - " 0.3914415)" + "(Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'}),\n", + " 0.36913747)" ] }, - "execution_count": 7, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -191,7 +183,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 15, "id": "b558ebb7", "metadata": {}, "outputs": [], @@ -212,7 +204,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 16, "id": "428a6816", "metadata": {}, "outputs": [], @@ -222,7 +214,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 17, "id": "56d1841c", "metadata": {}, "outputs": [], @@ -232,7 +224,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 18, "id": "39055525", "metadata": {}, "outputs": [], @@ -242,17 +234,17 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 19, "id": "98378c4e", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "Document(page_content='In state after state, new laws have been passed, not only to suppress the vote, but to subvert entire elections. \\n\\nWe cannot let this happen. \\n\\nTonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', lookup_str='', metadata={'source': '../../state_of_the_union.txt'}, lookup_index=0)" + "Document(page_content='Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \\n\\nTonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \\n\\nOne of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \\n\\nAnd I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.', metadata={'source': '../../../state_of_the_union.txt'})" ] }, - "execution_count": 13, + "execution_count": 19, "metadata": {}, "output_type": "execute_result" } @@ -273,7 +265,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 20, "id": "6dfd2b78", "metadata": {}, "outputs": [], @@ -284,17 +276,17 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 21, "id": "29960da7", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'e0b74348-6c93-4893-8764-943139ec1d17': Document(page_content='foo', lookup_str='', metadata={}, lookup_index=0)}" + "{'068c473b-d420-487a-806b-fb0ccea7f711': Document(page_content='foo', metadata={})}" ] }, - "execution_count": 8, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -305,17 +297,17 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 22, "id": "83392605", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'bdc50ae3-a1bb-4678-9260-1b0979578f40': Document(page_content='bar', lookup_str='', metadata={}, lookup_index=0)}" + "{'807e0c63-13f6-4070-9774-5c6f0fbb9866': Document(page_content='bar', metadata={})}" ] }, - "execution_count": 9, + "execution_count": 22, "metadata": {}, "output_type": "execute_result" } @@ -326,7 +318,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 23, "id": "a3fcc1c7", "metadata": {}, "outputs": [], @@ -336,18 +328,18 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 24, "id": "41c51f89", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'e0b74348-6c93-4893-8764-943139ec1d17': Document(page_content='foo', lookup_str='', metadata={}, lookup_index=0),\n", - " 'd5211050-c777-493d-8825-4800e74cfdb6': Document(page_content='bar', lookup_str='', metadata={}, lookup_index=0)}" + "{'068c473b-d420-487a-806b-fb0ccea7f711': Document(page_content='foo', metadata={}),\n", + " '807e0c63-13f6-4070-9774-5c6f0fbb9866': Document(page_content='bar', metadata={})}" ] }, - "execution_count": 11, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -356,13 +348,140 @@ "db1.docstore._dict" ] }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "f4294b96", + "metadata": {}, + "source": [ + "## Similarity Search with filtering\n", + "FAISS vectorstore can also support filtering, since the FAISS does not natively support filtering we have to do it manually. This is done by first fetching more results than `k` and then filtering them. You can filter the documents based on metadata. You can also set the `fetch_k` parameter when calling any search method to set how many documents you want to fetch before filtering. Here is a small example:" + ] + }, { "cell_type": "code", - "execution_count": null, - "id": "f80b60de", + "execution_count": 25, + "id": "d5bf812c", "metadata": {}, - "outputs": [], - "source": [] + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Content: foo, Metadata: {'page': 1}, Score: 5.159960813797904e-15\n", + "Content: foo, Metadata: {'page': 2}, Score: 5.159960813797904e-15\n", + "Content: foo, Metadata: {'page': 3}, Score: 5.159960813797904e-15\n", + "Content: foo, Metadata: {'page': 4}, Score: 5.159960813797904e-15\n" + ] + } + ], + "source": [ + "from langchain.schema import Document\n", + "list_of_documents = [\n", + " Document(page_content=\"foo\", metadata=dict(page=1)),\n", + " Document(page_content=\"bar\", metadata=dict(page=1)),\n", + " Document(page_content=\"foo\", metadata=dict(page=2)),\n", + " Document(page_content=\"barbar\", metadata=dict(page=2)),\n", + " Document(page_content=\"foo\", metadata=dict(page=3)),\n", + " Document(page_content=\"bar burr\", metadata=dict(page=3)),\n", + " Document(page_content=\"foo\", metadata=dict(page=4)),\n", + " Document(page_content=\"bar bruh\", metadata=dict(page=4))\n", + "]\n", + "db = FAISS.from_documents(list_of_documents, embeddings)\n", + "results_with_scores = db.similarity_search_with_score(\"foo\")\n", + "for doc, score in results_with_scores:\n", + " print(f\"Content: {doc.page_content}, Metadata: {doc.metadata}, Score: {score}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "3d33c126", + "metadata": {}, + "source": [ + "Now we make the same query call but we filter for only `page = 1` " + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "id": "83159330", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Content: foo, Metadata: {'page': 1}, Score: 5.159960813797904e-15\n", + "Content: bar, Metadata: {'page': 1}, Score: 0.3131446838378906\n" + ] + } + ], + "source": [ + "results_with_scores = db.similarity_search_with_score(\"foo\", filter=dict(page=1))\n", + "for doc, score in results_with_scores:\n", + " print(f\"Content: {doc.page_content}, Metadata: {doc.metadata}, Score: {score}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "0be136e0", + "metadata": {}, + "source": [ + "Same thing can be done with the `max_marginal_relevance_search` as well." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "id": "432c6980", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Content: foo, Metadata: {'page': 1}\n", + "Content: bar, Metadata: {'page': 1}\n" + ] + } + ], + "source": [ + "results = db.max_marginal_relevance_search(\"foo\", filter=dict(page=1))\n", + "for doc in results:\n", + " print(f\"Content: {doc.page_content}, Metadata: {doc.metadata}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "1b4ecd86", + "metadata": {}, + "source": [ + "Here is an example of how to set `fetch_k` parameter when calling `similarity_search`. Usually you would want the `fetch_k` parameter >> `k` parameter. This is because the `fetch_k` parameter is the number of documents that will be fetched before filtering. If you set `fetch_k` to a low number, you might not get enough documents to filter from." + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "id": "1fd60fd1", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Content: foo, Metadata: {'page': 1}, Score: 5.159960813797904e-15\n", + "Content: bar, Metadata: {'page': 1}, Score: 0.3131446838378906\n" + ] + } + ], + "source": [ + "results = db.similarity_search(\"foo\", filter=dict(page=1), k=1, fetch_k=4)\n", + "for doc, score in results_with_scores:\n", + " print(f\"Content: {doc.page_content}, Metadata: {doc.metadata}, Score: {score}\")" + ] } ], "metadata": { @@ -381,7 +500,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.6" + "version": "3.9.16" } }, "nbformat": 4, diff --git a/docs/modules/indexes/vectorstores/examples/faiss_index/index.faiss b/docs/modules/indexes/vectorstores/examples/faiss_index/index.faiss new file mode 100644 index 0000000000000000000000000000000000000000..92aab3fe39c91c5a57ef23ccd8067dab938d7d2e GIT binary patch literal 258093 zcmXt=30RHY_x?+WOpS&T5h*eisb{Yz$~;AhGG)q?S!A9oN>oTuQlU^HqI&jvGDM~l zl_+J%lzGZ)i2T;>`~Sb!<$CFy^9*~hb>E+RopTz#dWeliLk*3t8vp-4-T(KO|NUQ6 zQ~dMK|9+M@d_MP2v%*KmBGEc-FRmGOnT35k0xu(1z$4?fm^N+@X9=bJXUIA@e@q`n znIz+bvlAr0%6crb$vw8eDgu&hm*bcPU*V5VCOV|GhZXjo_|}#SvGLC^3Z?9vN_elIY{2SKGxr)Kb(R}FlTPThlm^lesyd8?y8qQ(b!I3yz z{~P<**_cO~^u_0i3R|@KHckt;z=F-L8I+>A`+#^J-{@GE^8gj6c+L$_mY z*(#I*dLvdH#FT_8*3fAS&dd6VXV1Fg(fo5vv0TGLV#ee6t6%tnkFn^}tG@cK?MyHX zu*NN>|1qbt&hlc*Pf$6v6N~W(ad_eK7`BHjQxovRwEab;gI@$F>Zp#N_W_qFTgJJ^_?O84)}9{bF~ z(=&s?)AJY%%=(7b!ENP7$6Dbejo~;~Z#@3UoR703+T*?7!_mih7iK){4ZCV)!tR0< z==m#>%On1>{t?D%mg8spJKtL4s%{5xUcnOH^q>YFv%V~?XZ7H;T?}N5zQB?v7Anhn zcYzPCjnw6d$$0a5Pu^&l4T$~ssozqr=W>bV{Obc#zcp6rT>NgpV90KegLC8@zTa05 z2Ojyu_jDKtuHE)=2Z+X2BYtu^A7qynLshrqaQeY+EO4^`ovW{vP4SnM2XY}R|9DFB zcWcY)m2xnfbpUqwt;Ej0TOqFD4A{6hzTffoTgBS*2ag9~H>W@}j6Op5mJ z6)>=9k6VU&gQvfNM8}1V&yV3^$5&uslE{}~0(?B!4x2f5lIU^OqeTl?+qMWd{EC4e znQvgCb1K|8ql<>RmXKC$C&x8>#-^AqW9IscvAS^tYc*mubj#BO2aBWVVA1I!)vnrg z{D18C%za$co&81)csuMaFY?<2p}iX8+oIcb>+aO$IuF)4f$Yw8ytm{I(lPO1kKH%bx6<03uo%8K*^6)1Zuw)Px%sYf1{ku?){DGN%pYe=N z4`{z83|;dA@v4`zOph~Hr=57?U>oYChj3ON4aU_8IJ?@KU#M?}vBQSIV#_7U`Chwu zpOwR~SL%1ZwqZF;Nd3)>?l|KuXO*3P&>ZfMY7M^LLtw$IBW%tMN47fH0_|*PL5a>e z{zaog{0ug<*vA7Wj)rS(E6{4o3%EU`o|&to`%}& z`F^<5I1J8qnE;cAPZK@MJQueG>P5zD^{6-PxTU=dR1WP9Jygyw%<8Uo`P2_03uD3I zdP@+x@T}@EEF0R62|cJuKB?S%ej0-9T5;+3d4+04{@)&bWRL0}!{QG*FnCRbGU%8V zzkfLfRz$wUfmyz=!N0X!{;@konw)3lYpk%t=F8GB#~ILHr#+NS(NtqRnsUoiZt{Q| zS<(r&cv^E)aDF)h0$&W1Y<}r031@afy-zkkua!tMc+}b}Nc{`PG>V{}%K)HSlK+c# zLef2cGA$L}m#yd0i|o2;(Y5u0g;&mi&;^ql>AaqI2EG8V6RPKM zD}IDRd(TLlIBBOt>W2obO+-?*c=r{ZRXR3)JQIQ5X3Q!FsK9!BaL+pE6Cah201ZVZ8G17AG_OCOqoO+ZOh5RQi z^$o%H#@n!Q+pA(vVPQ%Z4F6NZ=GIJwPtI2`Z`w;{wZ)q>_N-KJ{UrASGxcK&b19Vj z18IZMecb%E1ijoMd1*l{(re)Nhfv(J`7SFT&9QA@0i)+|hDuL$+U*NZZ}pr9fodB4Y7Qd4V28kBzS<2oEnBhQqrZ^qRah) z?IPKQ&}ga!ThNM}K(AW|e`OEJqeUnz?2yf3oFcI4uV$+KydXGH5tC(eumO16?ZL-B z&v8$aJmwzm24SK5MSo&sVlS9%TgC1tACtn4HABVn7pu6^pTGEYH;eRFS-w^V;ngut zM)K~gzsl08F#fm8QyzbH9qGq6^7N)SW1uT}Xe*U^fn8qI2(KkQB+Wg5PrC+CeQ&@& z<6Mw@85N_U z3+kjU75Xncft?<;7mnuJs(bcd=L6=aa9Yc(Z5xA;dI&nCoQ5vmfV%=8^XDcP>#onh z;xIO{VbU}UE2;kYWr)7}A$CPNh`nu|aU8MGT(SNahy5cCf?&4P)tac;;3TbYOX8HZ zNLq~tavt)fzD*(b^I~u=@PW@e9}2%A@3Vpc-3(4WEE`!2`U5iG6MX5HUd0kHy{jLuMmwvwK~n-$q$6**b(h_a># zkeCe*Zt=x&UgtRJlWN^9ln)u*1xP!&QRYbWza9tV>3H2$OBLFZ6Wj~W_i7G_)1LBY zwfjNs-5X{-@CWi$81k?qkUpcE(IKG$*nFo{=Vjt}ww`fNnCZ>leg4Kwx;R5rn1g)b zqb2E=HxuhkYpgEO|H&>!e&VgBFM(;%r_cdR@I@)XklzA!61qadnI=?ccaXe>lQ!}b z8$)5z&Z~?V8g^FqhF`n4;`p*Eyk_GLai8b2W7bYQ__96_Kf{d{?NqI%lNtFuUd*Y0 zO*@B^{~Z?n3Ab04vo>}+7tI?n&ns!8My=6t&*#>O$J9GBlALRPW^d?Donj z?DvBxM$94U+CIi@-Tic|woWJ9 zWDWRBYda%dsyj}Oku^3BzX5{#=@@+M_;onoaW5u(|M#dvV7TNw_c)_T_wXJPn6^6o zw*=A>33raW2pyWpD)}ZSFXfakxTtOMy5a|WF(80ru$dV=I~>G3ItRikqmINc-Jr7f z1*Kjo6HF?46kE2MC|e|UpvMzf-{F&}cc$3K56A>uDLeV;b5c>oY6@|(Z6}a7X$f@@DoP*Oa0arM^^2` z?{3{-wD)CvytxbmC+0J%U%B{0Fl(`LAU{|0n%2Mu0yE2~ZYDCZH?t!S!{p*J_>da{ zf(d)BD}l~an^0YwLDX7j?s&Nos+dAr{SW;-wu62}BYB)U07cK(EN&&QDME$VOXw=B z+_D&(Tc5&_g@$P0){O4on~@%|Po54CX1oWFwYGWEKy$L+xBfi$5H;$j_I|6*Mz6pck#vy zVhkv#=%Ab{_{E4Dg|1_GKMB8Es9an#F=QPx8clRW!93FMzN*mZa|LUV*cAmoI@dfz z@&Xh-lG(%+k2h`tlm(G^3I1$CUN%GrPWi2MoT9xJUss01&0mR>NB858iRF-z`GoCr zTmXxnc=F{YXV{l%K4_=?omkJ2|DI{cBZnSQ%I-A~J&x3u#OkR?%*Tb7j;P&=l!4*& zC%smiBQS8=g_VWJ@p|rdcBPxS{IF&r7x%feb~_L!h&)1hb{-x$HG`Aa%HDSR zaDGSwRKv2N>w*s;xX3Wt97Wwct7;FmHOr84naEy({bhqRV-TAEs<{SQB*r7<3aKW! zE8R;yl{g>8{{y1WqG`cP;S~^nwFY!%mvNd4pq}Vt|hI3keK_dN86ixYGX}#Roo#Q`h5h}*jd1T0k@EtkUaD- zFR5mD)UziHbN9smbaN@E+JG} z7ewCTHJ9U2Xar?ViH;|HhIeVulk&v>@+|cbP+f}iqL%$lstZF#ITdMc!ODgN1I-*@ zX^@uu`sWTLzh>=@tiq`q1DNHhLM}4z-GFOs&Nu}--t-V!A+#TNK32f~o4?>ziFg8} zfHX!vLwaSPzVR9=lZFcIggIxggQyq5803Yd14nS5<2%;bw;p(1k7i$1J`>z47=l@K z^pME=)&_0M*Osx|tBYo^LqV0{6OCqLbcTi8zw?ESrV*?c({Dl`~*e z?~Z(VSh~<3erd~UywTZ+Q^rBMCnWvG*dixRj7=Hi1_+HN{{iv<%n3fjUwgD#j9mTA6eh-VTz@8zUNcav2R^Z^RVl}-l)rdk!Usm$0HaF>%XD0R`KJ~%DmsSnF}K>w99sV~!y9M9Xf`s(VMg~d z(F;Ix6W;Jb1C`icrayw=l=k(Ts#G5||N6y9pZTHSZagJ(8XU;+R4G&N6WFlf)415{A^px^@yt*M$}4c+t^?96i<7=cw)vg#iiaMG{-JCSluH@S zjDXe*tvWuyg&iE>N?rsK^V2am)9j}MCQo#O{+o`V&2tkzG5-Vz?dP@O(hb&><*aZJ zdkblgFNzE(?q6_S%eQUR5qS@2*1}(Sk4Ku>D0Gi9dA;BlPWz^|n*K>}9nu^@A!b6Fwa|So zVAjDgsI}z@w=;duG)>~M=eh~7(fJ&+jx!LMf;a}Hb!8ub?yoMNlV8B#k8L>39oZ3o zQ!y6<$_3u|wxo>I2ku&A;>3I_JksKT$PO&5ezeF)NWKJ>*c&NJ z3BSU-7WQh|u#C!tRhCg6prbwP8^p}%Q zv%^V67ceO&M@W%yMvspSA952v1q;NRLeVi3bYab#sk{?-Y0g>1Rga(t2t3LJj z!_q2U(vxyltM>+Iw#lzJ8Q0B`sLti-zZE3^Vj^Q@T#-bU0qbrpRGN|TZ11~Fu-QJ# zY9=xo&9Yd~2Mv~gax0R4fzUmw37Uz82p^V-S&(#8hO$amFCFnIQG5EdAAu1byMSEUG7$x6EqUt`~unAA5G;6r7766*K4@X4AuCwM;N$CLtPv= z5pADr$49xR@#3avoVhfCeOS-2QQBT8sT_fpOSi-CFP3<&sxveW-HFfIYOB59moS@% zV9c}1=hd$6%yq_SdC2hs$PHQzk7~a0mz%n)<9n^aeXdDpk`{vlOOLPxc1yUMj;7iu z%|I+M%@8nMK%YVTe zd^eYu=4iuP>wBD8?B$!DB=ea1nfNi+p6$+UqL!_uyScP&>gdYJvbGJTT>r^R;L>Fw6L+-DnA3(n}ZUO z`V_miazyVLC0KjE8K&H9q0-MJON(F#jylnAm31^;99G5xe1-z9>7h0?lz{Hd-g2@F zR+zuxhI;~;#fAxZ^->o2Ma4na{7aCi_Js`10-$y0H_{eCTC2bO)8MsyRplXmEaDPW zp4kF)JxZ3EjdSM?#8Xp)p_B75{B`pjl&wFD2DLk-D@q>xYu^OZYo1~K{2q9vAb>xx zz5!pYrm?_Ho7t0UJ-plOEVHmJWz!D-Vm~+80j(8IFz*VhDor8m<7PT`j(9$5&us}& zK6jPXeo<6Yq1>`O1k2{G;hiqO0`s3qK=%T1RWVGg&5cPNcy`Hap!(qPgG*po^k)2J zkOp^i(%G1$2l&lJ71;W_H8&gQ*;n6kSYuiMXDT!KsXZ?6D7UBl;~fLdVVk+3 z-VMHCQ5GEjz5tx&rt#`%SF|^?xESP;g1r|R$-k45;c>(xG%DAEJ-2GuYYXy%xxesb z-!s%Buh7A^k{^4bQY~)B&!yG)X4W3MXB%-n=&`5^{5L2-N^7+NUQO)`StsjZYLYe| z<&y$${am28rY`lGih~xl6FmoXKBRi(iN2Y5RC5S7O&BX5H1G%NE!?Nw6$Y55!`b9j zEY9?qM7@BiU;H5~qdyKbkLNW3=AdDFJ!@ChUaG@PSbVpH8P7e;7DRLgJ?9u{-eAt@ z9{4xMDE{_G2#j?J$0?r@P)VLqt5%Q?0GX9Rn*$U*r?{};LVfxHTQJ<`y)d9 z8$Asl^MTFh@lGKLuvbmt{vJC~Q|}^N`%by8bO)qa+hK(996rQqH`QP&_NYm~PTd!B z=bCPyKmH-Ko^h7Zp5e?-vmmlrBv_fF9l!ep({0R^qj{P#T^Dw5 zv>e@nt^m~=I~g)VS*+a}O4>A~{kd1S-akB?Anj5wRn&YHGnN0%Z>07K%w~rwPfPpP z#Na>o0zS6Y-MVX;{e3KSylW%p7j6d6&ChXcejyXJ;^$%xufH6|_pt-v)5lY+`-)ZY z(x+OgZX1TiZm-xr?UO+30H&Trur_TtKdf_%T`DgIswZiDO*+m^8zA@ZPvhp$am>mg zm$lQ}uDrV06}4Y^V4`{+cT8S~I-VnNakRd=(=Q$l47&lOYp_K-9xo^9;*O>1N>xEC zM(fY@oZCX-rlWkQb|Cu4H^Nmjo;Y+iJ0vt29=9}xn@3|wo1LJtXAWsb6Un6J0InN+ zl^I!V;g`zY;a}BoaZfP4_B4A{`;pE3^qk&r>jXy@jfB@0FJQ521Xkp=RY%t*Dq$HH zSz+HK=m42;#bqefk~Xw3PJ+$8iL8DrYu@Z@Z{{9T%cu`<`yRkCOWR!Jq;AYjFeH#2*TszEaHIdI>`%m}_$DCxOo@33vra}68 zv%34ZU>^n%#;@3*MX}Ooy;%@nXaQs?2+D@Ll}o`&e>@Rqcs zx0F72E=+5(3C4VygyeTwbPa6fu_d5)xdQ24RO9Rg65XFXbPNrk+4GhU-HwgPpe>4$?dTC!58g>7u4k5|xknJGP_RG$%QYY@lTf)oA}#4+l3)@4s94vyH%T5gj# zc@4H59LdbQ2Cy|c2Jpgp69kyuqTcGxZVsM@@9F(r+CMCs+7?JF720DDov9=k2g)(IxY@WqoM&+v(F5z^~;!>4ayk*_H{ymUqA z7)*7(#McD5V^Mk){&>g5x^VNkfE zZ4aPm5sZo7gLXYjUzTPeZGv-$mk=9uj>lWT2D!))h8Oso~{C+xBbXSbgeAbBYE zDmBLHwz>-G6qd#+(APK%CX`ykqe)$f$C`tybz{{o^&I~_>^tundWgLK0~h;vVUas% zY`DrvzxjfZ#})E6^1BESy|g6N1y}5`1=3KZzs_I+K7S`K~CzR3qZA|#!kDj&&T-8_kxzcf=e!)%=F8UjPVj-nu*Ecg#-KPA#T zpt_WjU=_67rU-qIBVNYSe(VOh)pfWRp8_?^LLK5&*e|o?4$(W@)j9|_ANS$6TZZF- zO+om1(I=|MDt4CUclBqSV6-N<#$8u!KhhsI8-C_Sldr)byQ$dlsgFc!s(6LS@H(lZ z@E_Kl-gj&Fel$MRxx}4cEtf>?xqZJ5Lw%btbMNjfc-v>8;gXGE9<=HDg`e~|gxl8a zMbX1qzD_VAqz@z=|0DI*3`62ScoRij=7(&{-D!NKVhQF4zTlgv658tq7;JqM*klhl zs+kM@zdM4dc?*2!q@y0O-_Ckx=;Pg=tF!LCI|0OBiq^K?^x2u&Q0uf8MgLR1!eGPu zq=n%yaP1=gd8;J^)jy31lUh(;uf~k6&3JBX8ZLa&5iU(NpvQXRyZiN39&HCM8|Kk0 zJ{YX(Z^d_)ZVP<_Pt63XaV@;2(+{%W#i7fFDww2(*ZKLPua6n&Ef7ngShqE?&7_)u zBakzTm_B+BtFTX?dMgIuofD6H;&PvCp;Mgh8;K7vCB1}=-F9E}6H=|=eV4oZ;8aa^ zYvg^ZrQQ6#avxHYwzF@Ad+^|%!9W_oPyT$xrg-1r_v;ri;iblIb3pg`Y@~XpYwakq z2iT0~h!B&Df1!vWH;kl|~{7cItj99XcO(Lj7(00LVzmvMD#Dh@%@(y6nG?B^JQ|&w8TDg-i^zz0hC-(tm60SjW?2=iH zftUp-+o51r!9Y%KKe0UYqjWpMP4TOr$d>fng~S|Cv-B;9JtQt+LL*-p97obj?wWiH zY5icUaRtnYt*_3l(v-zj;$~Bi1 z@?qAg`+GK}r$64x+Y9Nr|I>V-Rg{HP;#^$)y%cv3&PU31idgr~C5N%S;XwH7=YfCwVwt`0Z~hKNoX5+|9x2-)t+`rL z|0P7l>me~0Z&VY;1Xp$*KSR}P@(82)hVYQsmNI1v96Yim-)cNn>Q#DBAq`?w6X0Mw z6UlSoON2M=k)~j6eBO++HbF^n`^gxf9KkZ54B;Xh(0$>7n}GoJcZ&Qc(K*=5l3$$I zQkrr7BfGq`pe|dh%}yZo3a4W!-+PY2IquuBP5+bZ)6p|PUI6n$?HT29piB&uarxbx zK9J`a$V8^2oQa+z4@09GT?{iHEL+vD2jL?ndGqnN67>Q44!eNEQvqyl-J$xM%lAZN z;9>WURAVi`za$Stt%-bYTy&H?mNCI*&vRsOIaWdIuwQVkI5!JDx($gZ+1KiWw8u@= zw#_H7tuLd9!Owz4ts!^wYbZBJPGlA4^Xquc^ZmT6KT%KM{G=%8-sr1BeS`zDH=x0W ze16)+5h6YIF^`N+DtQN7+VcR3MOdqP>-ojnDNJbdw;e|G_i~o+>MVOk#1NagVTZuJ z>czh8C~)y*dk<@>Q& zF2=NO4RJ}&W)S1BzK%TvyGz&0S;?#*rd?zLwU^wX!oDL?j>PG<)hM(jBjcXXT}C~J zG|vFR#mvG$9pmt@&WBphu#v^A;AP1g7OLA={y522wXmI7H%Fj8%BFsm-?)@YUs{^u zS^6xP$*bvgx+`V~_YG(WgrkfPP&b+lM;yabn1ARY8dyb z)-s(N#H@g_AE&&`^?me!w7xFeZHh=$Xno;^ZYRu~-T?j-nANeXeUn8XbdYkXs5_~j za}Nh%H8$a@FaDaE%Ct?}0L=~r6DyA+CL`4ph91?z#jZv&F@az(Ma*18#-`flv?eOe zLV(W4iL-F#KH zlUJ?#b2=AT2lYbgUq!3ax-7paGxbjNMM=yK7FwAhUALqc(gqfN^`idsP>E9+{hin$ zK@qhi7~_Y>Wxfc?S@OF`q5mp9&YavI%0OH^$L*4qYT;un>KT%>&oP7ACK&YT zd!1JaMi#vwvUk2Zl=u0X!^m5Z*0;{1U}+fqJ=I#JOwGmh(L9SE?|GcqWiKNy!|fLR z@T_|`Xn$+4RFUxzw3DRd{G1FnRuEY>?@cS7ZS zR-agNOfsI^p5{p!YDwiSiE3089zl6pt!MUP^ zKs5_AKVdY>fbYMxnV*9XyZO-sonL*$k~SgC^4LgrU{hnDc@>DPK+KNBXUzP-G7$Xo z!0(#Kue|9kWG9=M(Z1Dy;79Ux=HPQgaGHE@>J}vSuAA}F+A?Ar-sO4`Q(PS~hN?S& zasd+@O0Q9beh^PHk&&tYdAs-dxEWq@k$LFcDy=Jzwzz~l{W<{o6U{X@i+z;EjFj#f zYgi)nXD69*0*ZUxa`y?ODg&WUpd-*6*+I8|iCF=oyd>Xw2|#mF zMjVI1+m?!&VFOFedE4eCoV*@MkAZZeZgxwvZbli0d~Y<29@dFw&ZGFhs+)o@MJ{2q zr>alJDG02+Ou2lfM6(PrtH6)C7ep>m1-JKzS%~82g8hgKc(zZa6dYxRG_T?`gOTsO zv%>6mW~!J46YtA37o&OXNhWgsj^01n<1zPmocAI&CuKJdU8jdpL7{N&d2{^IGzDk- zrtxI+0XSl91pJ^(a98U*T6WxxS9|niEq3>X{%sRs+oZM9>b_M}Yc18zHQTVu2t(X7 z;UKiAu6EdVVGA!Z4B-i*vq4Q#vC69#*QAfBzP|bi6IwJtzoCr(w>6iW?7WOY^;_cD z8z=Zs&)3*(+HE#}`FW)0vOYid!jm`Qu%V|e|6EgyNB{O_N$(ybe2?a>uN{=`hP0D^ zwAGfMTNJ?eCTGyB1}cVg=o-Q@c}xnS|E80Pb4B>Wxv1#b>~g~ls)13iupHC6sT zI}*=!7={Nb*5TH+PLN~jiQMy)SPyw+??&>dc~@BY01tYuEJp>kfPkZ^7+xU-(el=8I?des(bWB%a=T&|@)a@j?_P-?EbE41oY%E?#C@t}hiKD_aSH)wQ;?)e+IZdk+Sj7-MC)|&8fjkKcAS!p3Zpw#Cdp{`&v?07swt*-v-EPhNsglRZDC0c)WN8ZhspD@O-c@?}@WD1OTyeb}p39CB5h&DDbpt?O)>c&Vns>ds0Ki>`A z0;@gsm9r;%DgI`~ zEc`UZo_~B94=02EajORv=p9gBwt6rX{`7W)h6$Td?AhwYgK<>XAWR@n#)2GlYo!47 zCk$9{fkhp5Mrcw1RrhnjvSV|kYhX`yq{7r%eayUjky|ZmN9)s69vy#}o$3?m;Jd|8 zZkq4I-@g0h&^U2A{`dR@_%$XiSCW+cb=Ms>YU`*}%WVANB>0z=j9Z^Shw>Rm+2wgV z@c5te@Zeoz2x;VpyVst<39I6P`T-`p9LDUY2XXhKy;4ZXHu!boE;delDlJ+@?~PGS z()HrKN2wURE(NAtbpWx44d$E6QypFemP#L8BWLp?_;oOh5x*6}Q9#HKGeEw!OMF}9mbx(1tj?BoA_yujAp{Xo0t z2sro438!2;kCW0L(_S9LsfSL3e_lH5AA1%zZj1x|V>pQaPdGn`|Jf7IowvM!ii4W! zcJ~C@-^0AL^a4*=epfMz`^kJp^prN5%A`44Xgy}(qz5}7pn<+T!TksS($rer+`~}R z8m(71*kRokt)q^^T>nNOZF|Nl9x9ynpNs1yOhq_oDTfk1nk*KpW@{q_5C$Qwcu$^al4|_=303 z$KhkW*0Ns3HRfIangysz_AKayqINho{2#g?n-B%SZf# z{u`J+t3)Z&H$?l0bk^+gWu^PdTF`D23AAqt!)!P(qcN6W(MHk_esAqazI<3l*7NN) z3i*em@ofj(QvPr@d9s`tS;=gUYpF}N>dK#s?(jS1U7@pH27A@QjJNs_hc9YxpmptQ zwq*N4w)d=_{OQUM&{$FfL$Yo$sw3{@wh5v%EVx*!eGO|_%JQ)g`eg`oNGsrN4jIDE zaf!Sj{C8cgH*`A0Z38C4F}46npBVWMgxws#7hE2UKFRv3(91#Y+Om<{JuCaAANrd0 zM$#(w$*(gSpBsv6+d06K9X-@S&=($u;(60!Y-N3uO?axv7B&pdAw3FZR|Y>u z-8rXm6P9w?3)mOj1m8_f=L@G-^01xT8EK>P&p4Dnr|;;!I5ZobO-{l{H+t{BYY@!5 zudA|+4e&v5BHYLuL+>}O;Gd>sV%Ms46ds@3zoFxBr%Vvu{DYg*tdMHhb}1(>M!E;A z_v}183OdP!W^6gu71|k`<~IkOX#dZ#`*M`}4v}=;zD$w9!X^^b>jp zjz{WKM!LvFeW>&OF(iDNa-gDyKeO)$)=`!)!(as7k(;Zu4iG+T3$DLot+f4u4eCvOAz zZi1(`b>UtmAxgQ%WyOBV15Vct7CT7GM`tVKVbJyT1DIX<8N&xWzzGliSqQOs;Cf4b zf6W+hK58J>{~7|M(Y$BieO6=k8e1B5;q^T8*w0_(ywR61^6)uK*?bO}X-8%-Mgxb< zcZ8aWSK&mo8|)383nRmZDb%|VY&=}(88n@n%!jTE7aW2SGXs#W4N3o)+Nc9*O$;vC z8pcR-fR2q_3pLrhP975J9FhhKuSD`$-2dqg(m8m7;WJJe2E-?f>WbY>aD_oW3bz|_ z6zXku%bZ1XCx<)X{N|i6pglyY4JEU;Ipkn9zxw?ilHReo{yXqFeU6sa7&jy)GO9}? zZzUZ};lvS)xB_m@p;_3@?I_l}_mV_d+1v|OFTMuT12=P5dasSV4M%?I#z)uN#)uIV z^BFl*8=E&KdHuSF8^KGT_rZg z?aBMzOo+;s@@xv$)E42DTY zSL*!c=iWo8Gaw#_Bat*2{(P$sdtG}#g+r`ZZ$6=AHh$~)1lHe4fv0jUQtim3?L58T zI({kQtwQ{+l3rqiTMgw0!4|5OF6T#MT!8o(dkkx<>K&a2efo~(gO7hBkKT=wyRF1E z1;^oftIg=VWj-{!v76626-=L3h^=FxjoQKBY|<6zSghmNg}C7IHSXCZ0*Qk`tP!ys zT;KbZ<E9O*_oLCnw{+Ex@ zyFRh#T}!a|eE?hcpN&lZELcPE8BZ(hSI14OvR#4piwiDGFr2|02c@u@$iL{jWgm;0 z^HMM_%<2>mCO6mCt8A1C0wwYSl=;Vij* z@L&+E=ahFqA+DA-=l2r&0pxFj#X(+ZBhQ;sPp0(*@-z^c=WpXo5FUBLHxG~9PgcmY zRO(ws$A_RJXW7hAUGNxtCv|Uek&jNSkv24)1969LF{(M-XiA^WBc2Wk(IPf&32kR_ z@Q&`MS{^vc;=DhIeSxeccjK!TaUgoZFQ6Lc7qv#C*f8E@#6GMa zZUyAeK)x+IAKgo=sX;l_6@;Hq7K6BH;hg#k!{a5$-jsob=Z=$(xMWTI*Bq<&9l&;H zM?$ZkEl}fIBSyU>9of0caYNHrBJ)A#gK_jZ0wWY$d~5wcFdwNa{9hSswTm>(n^W!3 zI&Q>wf9}%W_XWxhoH$5*6WL51u)vP=`4s7yE)pw<`{%*W7O>rq48ZTz1D2B!g&3{_ zCsV8_!^AS;1j@Da-7EX;(Eiz#azB){6Z1;=X2&GC!Q7@%0>3Fw0FWz8q+DO9OoB3(%8`OtCTzTFySeUpU6mB zqQv)W!d|~VfWlu%Kg2O*Vizo)_fsN9P-d4d#Y=aF;)tj2oO}U=uIrz_h&+2CQicJ_ z#Xvp>6%Qq7v3e_?^L8i`tVg+s)?^-*rS;;w66}C_oKc3s*~NxRCyl2PaWo3;cb{`h zWB{Ptn?>g(&uy=W+`Bd@fqJ?LD}LWVz8`;IuyLLDktP%Cc0jGHo^?$5y|5681G&h+ zln2;RuL$zlB&HMZ4^3K|s0YVfknY}kKwP|qa%5j{X>F&r`e4TH$B*ZNU#Vue#d2d+ zO56#DK3U-W?m=qzT$L|{H+pwLli)#fO+Shu7un^Cq0 zk6p#=+QNBIeDe|HZn!A)j8mpzM&&n=))AVwYR_^^AF>N(dC*bcmM^l^5W0_ED^H+y z<~f*MybozM!hh~Hm5CRzr1e5hdc(a|T4hr{g!LXRX+E(`^>iXSb7EmG z*kaoS&Ft`c?bH$jU0KX+i2s%Br^fQ@KsykcMwydOO82VE$;AI6?}O=@#ki$mbD%zu zT>B5eM{jr0S~XRD^a>fV9aocN+`6F?r}Idp3$#yD;BA0{`&-=yyOie&)v-O*J<`2N z+o~n{oJA52{JDvF)?Yx^=B;|U{bPqM(gok5=!deg`O-7{1f;%K9|d)V`3|1K8;}?Y zD1T5kE#u?Hwi5mYD}HtrvqPxgYY6WA>I{y@Liv~F@1W$YHRfcrMsfeev)U?@)sbc* zKskUPm=OdtYs6hz2T>=ZAB@*%f`+%3Gwn95u+}-EuHQsPro6-TP5$9M@7ByTrnNl2 zVmh{R@kj6I3^6-HdQ1{~)j`UF`(-xZZmGxQ{0L5S925*fIsudw7|nNxFNr~4naa73 zJEAoBxI&p4s5a!lik&Dtu6W*Rl{}bccUzI>YLJ?IN_adM`X|`xt`>c-%x?e?7on&H zn^Ef-)r?}7v>m?83j6sgx$;&q3v z+waj_sZ!BxkjH(^(pc~2Ia1ioOZ*J_GhzTPdWd>k%=pw;qp6g^WTbq=1lx7%uty?x zWWode_dZ~>w<61M(g?w_4h$WUW(~^ZkcZOM&^^S%$qMPSlIQbT68XU{Wfq(&I!ElU z#{{24lQI1D-#kV<#~qrEr2R8eX}&1iy&S;5H|fRPT1i}Fc48o;Ig(&mDZO8fa&Buk zvA=ctD`qu@x4%m1lR67l2J%}T6PpVc+qo-(NhrUoovtJ@n#W~|yheJDj{FLWIW+Yw zv@5&El3s1Cn+wuw3agYW0qirnePU| z$|aXGJ0CPtN#9`UuSA;JYBq@qLDI@q~~}-Xq-}7YR9Pl;cOEl z(ggYpQeOj^WFeXKPA%viA~bZ}AuRkBMUW{|eA9_BeJr(iY$Gx2*ok zdJx&JfqM49IXsf8!DBtQ;@#&iY;woTnC$%toIYf*e`8wF=fN-YSK+yIjZtjX&6`Nq zjMIl&^PtbC;iYkN`Q(QS@GGQ{-fybGN;V8H4O}a^`O$YUJ%_;+haEUP_Jh>M>mR3k z1Cu|V_-E-oe%Z$atL=)J{>USET0cR`@qfj4?hay;l6T0xY+K8QA)(6M=XaDD*)PD} z=o$a9Y5=~UcvZ>s@JEeK^qoT&V|kv{N<_m$_`C3jqNA4xxuFfQTg3rx_W2B$wcW{& zc6NY{I)6B=4K#eM$xf{6C+n}b!yPAsK}$ALTc{w+6i+k-r*1Ow$k zxWlakM~^bY#b3U%L}?_R2`ZDS5TCu$VZ`!$oj6P`1-gjhUsE(;zVt;lqL zt|J>A-0yx5^v9M%g>yf)pxFwX?%Pnd{B@8|{%;Pn zb*|;ENe}qo6*pl|{A{+q{V-@5-{In@6ZHF)s^7BDk7C$vqiFVQZb$IA9t6y63p;x& zH>+XjXwYf-giku-q`dBFhHYNwV)1^7YX1_B@9_pVgkQ(qtDK;Czru$MwBs*7+RB-G z2C+LgwXh~X8zOV>VmxSqvTi+Q^?iV{ln0(ZF`N(XmDS-Qy$|&Dv$WrGmHcx5KmO=& zCuQ0O`rg=tQ849N5&PPhlb(TQ% ztaXj!h?N|QFP9D-X6;vl8oa1j0Kbw35VE-iw~ON#m5%DXszQjT)8 zx-;|pT`rA$Qff~<%G%Cv51*gjSJKbbvZ%27nDnp{hN`dlXxCC08uSR?46=d02Hrq> zh$q^Oz+vM;k#tBcrVqP#XSV%&72rsHUdYb4#7302{?C)6};{Fn=Ul^V*sV0BYCyXT4-6A4s=|}edh$IDm;ru4?aNQw!>q# zM6q6vW`S=@FX&|(#gZP@?F~B?vkXq+5plk(c%!F$>b?P7j&z`XEap_}++)dj7!*gp z3vT}=C@`#qQ5$TrO=UA#|Jz6Q?V<@}L~g+D?u9Job2U;;!b_EYA4aq<)TXH@b^V6@ zmb;KHrL*W2S~As@GU`M#aCl;fbWGN(q$`lFVogjpc;ayrS!V?wUV6-RWDN`p8Rsy| zBLIAzGx?$2cktq0cP6>+V>T=NX@8dRhyZVBescs2{2S&lU|lBU&U?v2)j2FXuq~T9 z>lCd2*asrbv*F|6`LOkZjodOmUP^}{xOdQMNSS8G=RV#KRIA))wj~bl(wBYh;H;9q z(eKna$)*oY0msH!(D~z9APpz2AH@8=AAy1vRv^5KbOFYX*8y>i^=b#m>eUWi|20AK z0I-TV#m0{h2Bqi>t<7J>)2A);PujzD7Vl-G>)7s5JGIl0)2v%X8=k%Mz9jUy-;fn} zXJJbgK0#ZZy6+N@CP=nz?{cAM8w+hgqrE?;edk_fFQIy75AxP9W|5hS z#jl@xAo&NV8K;@>-O&Hz=*;74+M+n#Kv9v3qEdtqg^=#q>x7IU^DHvYGa-tiC>3Q$ zC_*JEim1+BCnAcX2pKbvnUg%@Tf0Br=Xt%{d(Pf#{eIuyy62qxLZ2;4+9geD9)fjz zHj011Y2Rw$^S}=`$E+5YNbe5enG=_I@Zxfye^`spoxw$`1Y2osMVn3U(8)&6?Ve*I z_)pDb+bl zvO?#IH(PTIt$u?|x;;Q*2G%zG7F*D5H|ou(f|O=axY^ZEb;^r@)p1Ke^yJ@w6t>tW zkn~|66pyO|on3D*zYF&<#Iren8gmB6v`M!LuQKiw`gkwf7S?!2-Nzj%C$L_N;;e07jktQZX2 zUu=Od>pQ`kIdt>j-M6)ROnuKc8+E~RKddm}zbTN>HWR3y82LUU?c?G+`&m3>q8{=` zm9b6jPI0eGqcE?5lUlcF6ZB6!`bHx>cOe+@i6CkiTL|M?~pW&QSC6=L-wp|vtp18(^8TIB)xttj zpVwEL1;>-;b;5}ooFL-J{@NZfj>*EZSw~^R-w1yFI=Yfh;DQFTT4%U|y@~=YH(Rc4OO0}tZ0S}rp*4Ka+(Vo^N8N1l4s;(rl z*lFf*NGvQuT5C?)qRGxZK&+&#=DFqZHcevL-5GQj^J6nk?1k=OQF!6uKAcyTEW8f4 zzwO1xYwBa`!Ef+Tlc}!n%xpP%HulB@ynba98a^?VKe=vY@7s030FU?lsqtdm7;MY5 zqe0X;Qr{E58ZzQMTo9!L&noUqH*Iy~eflb8hwDPCG}H(D_J-xK{^n^>t75+u+7}FS zxsAfBIz`>e zW?weV^bL#ufy_hz#Uwa26Yk zG?gh+$iyB%yamKqvVn;``Sd?X?fMYnOBB@FU#$?sVEolU!KuUr&UBBc0YvzbubutD z$#?j)cW&}JzcX|n=Z9KWq;&`4Kq#%)&4}TFd`(Sgp~bFWr;i?QrNOLzt3>{QAtx^5 z_vBF`S1IJZc=vraZ#ick7`W%~vVv>CSM{O1;|DIkc1xGe?`6cN+-onX$&eR{Pi7YN z{A==@aeR3F^>}Ph0+6<`p;i&F_{(xmxr2q@IsxMKL|&jP##o~YFQB}lw9LPw$r#m; zE<|XK=f;F0)eO?_a`I1=G(+?-?X$mN3iWn9h5r{m5|<}T0g)l=CwRNn41OiD9#o#~ z0#EX{YpBLR)Q5xFY<^%e=j3fUg5##;&Xl%f`XOlq^BfYd2p`pVY^&yfNyTR_#u#f| z0^LHRdlY z;c}o1%rZJI0%AMC_#phhr15l?F}?sv7eM<$G*G_7EGr|*lAZDPhOe+E&qD3vorOo~ zvz~2DtmHRq*F&7rj*B&2>S!+$pEEi~{`2n$;em2-hZIH4>IA!bo+I6!Ay3iPN4uRD z+0jRnVdWDO?mz1TpS*nwPY(FZJKsskA-+=|l=i^+7j}cMOC#B?OLO@+-B;M|KTA0V>D#t?oa!EH zKCTgaF3lYt4b-c(b<`zskEjW@=6@UFH1lSRvL%e{mjRkB zPiWnuD5r12?&CMHw8Tm3EsZCS8oNbv($o&ieF6lF5?dq*-9p#ioOgck9Cj{frjEU_ z4vdXIgWP>5UbPy5rh{8at!{P7eZ~smwyrjQ_qcBN0z12oK=MCm zQocxV6}G$i9}>?pIyU0E8WgNg`~fu|f54!*E#(pa!g6TeG{Kj{(EMgM_|wy^)>~YA zGogFLoZraH*5S?tdxciQu17II{l~@Jr)KqeARVop&m?s>1>$(B-LJ6NN1K&hXv|5U z;EqQU*uMy&_d0}Dzwauf@u+uxDsk8}BnCv$x0F@IT!Q8sn}D(e^{EpRT6%q138!qI z({D*5_MxV^n6oI99l82E6vDOY%ft)nl>4*bJVbj+oQ= z0E=I=i@#a>mj%zWL7Vkmz(0H&J83ZqZJv}Xuge#J)8!kSdY7(^UIfB}$cx#T`v>vz z_RR|Ui1<0~IP?-|4@p}_%8oTJc%M5VC3^OFBvVe89)4D2><%|1bh5*k}PIi%S&BpwiN(P z`Fn1)&l#y6bE!wwqL)Fe-A!MbJIn|2MOo-8F((w4`QWXnKp@Xm%H~hSM=!4^^nRii z!RE<0Fnn2r4X<1!uB+gbHGpPWEYGbsu}c^yMwKWRDr4GrhPcq(xH|3=2zJ^X{DM>6 zBk3vi+C>;S=pFC>Y9La!sb!%vbU%##6sguj#qkVUbl>&o&DG*q)wyZ9!rMXoH)Sv| zv-Ns>|o9*#D^xW$^W)7(o>0YI?#Mh75pG-_voWX+?ZX3`2|B6 zv5M-{>Ku}Xv+QXZKzdy}pZqrGHK#QYT3XAYmv^0$#Oz0R!%C`)N>1|>@~$$GA@F(f zHZc=q7c}*Nyc~Nq+aZxJk^W_o4@RNzUvu3;)O)WWX`({ekhppo5M#iJfs4i50hBs@ zfmn^7J!YiZb&0E8>x=2;Z4x&BVEHpb2>4*p-f3Soy6u;m?O;+<+@FG z6F=+Y`fd}ExLz%aC>3)_R$9>nU4AuDDdUS-C~S5OgN@IOWMUt+)65QPN!Ci-rxS#* zz+3XTIsl40IKWVPP9XIil1HGJ$LZEXCT6;{r&y7BmW%VyYArL;HK+*j5<0@>&wmQ} zcD)qQtAf4U!%P(N0lF_?I8v6wc|V@gd~YRX+%Qb};G$5@Bkj2?HK+&qW!E8hGum6KQSn*M39JK#79ZrSrS1(T1O2$q+~ zvoyVIia4<*J9w)~aXB)LvWPRIKI1e`A%95bIT2y}N0WUxs*myVD`hPNGv|(*h|csNyxt*XDrw#+_YwI!eqF_*AFE=sQbW_1?V< zT0gSFz)>rBhaVqZ$5;L2o{sPM{Ug`7PZt^cCKb5a-f;zKo=l(J`txDKTH?iNeNmU5 zC%-Ud9}cfuU#6d_hM~cDXTxb+{5AyJ(3<_2xDZXwrSh)(Ucz|YL>&7320RZsg=2yT z;54b7XLdI1@z9pP>~~X(yr1A1M{V43>;ron@q_OkeVipP zw?x-*ov|vdh`+t6%gl#G<7K0baCG|(?%~n}>F;Rhyzp0D2Nsyt9r`Z@xyfZ?n#J~n z^`>_4;t_i-4IjfjY8*w%hKcFA7M+?W?GAUY&xx#G={D(_}(EIT^q#U zGO3O{)#eBG4_g84ZjFY}>Ur>Rfjj#;!d@;I@6X41%CL0hSNz)hH|)`rb5FUk+Ozl) zgx1}Kuh&%~9yaF>>H4^_U-m$GcPnVRtq!(0HV*VR+F|sgE|}9=!J~Kz>O>S{yIZ&5 z)X4$*w1tiK6z`yw(RW-eM2J^n>~YIx4}+$XTE{Cuik{0;+@rzC1m$M;0 zx%Uma>78MFG~?Kr&pvqetUhl#?Fhuas$ota^4aCbEzarE^DkSET?0Sq0ZcuDOvIo0 znL(}qQ+D!@h0pPadgx`zl2_haqzp&CiP(Lde1yTpFUMo_u{0nm5dsTJo7d>x9F4)<~*PcbRIO zQ@!EJWEW^hk9A!3=ZBP-(uTkP7{{Mwm|^d{{n+DBAuF%cK{_7R@akBqWgQsbc{8Tk zGM?N{2j(8h!tqsWS)b3|?D**gJY-nMoTxk3@w96PbTshee=M@HpIyJoPJcATJ8hPU zf2%b&X7RZm)p+UJPRRYs+1sVPv#AexinJZ94u9j5AKCM!+k)Urwhu(_+zVf-1JL1n z7sH96U6^fl2dJXsdmeoft~b3*UMZTK&Aq!dG0@^bf0IkGuAen?=q+ z>H+lAYC?UW0-n33T)lITs{?NVtuLE#<&5%j$Wgvw@c`cF;SBz?Up`VDL0OiYYOA{s zPR$C3#C^ZSd%@3z|M6ZM9r5QvUry%)e^)$2s%h9bqd7d;{*b@8*g|cQqs>2?x&rkn z2+bScS&yaZ)Pb+-w`k62wZIF`TIy}HnY_z_LonTJD0Xq{B~!0qd?`IMCNv}a{N>J& z|L!W*UAmhc%r#dIt$KC^Jo0Xf-Kbg&k8pkHiS9nKjOCFGS>0L z9nfz#l~Wxs`@~2T>z45!GTKWt{kRm>fuk`j(x3M6Ei~3z58b+&V`2e4mv%@jpB?4E zZ|&Q{C+}>C_G?bE^f(Jv?B1R&k3OdCY$DV1PMr8}9tDO&*1}CESMJ<0lyoW(w{{y3 z`zL>awXX5}-hg)M$ak4cw@!Z;dfAe++qdH`>>b2%ZFH@k*L|YRU`9}%y zi@>*6H(@=)_Pp|BCuSI07bfHwai4X!p{08W7UkH@&FsVMzvs)Tj(WgZUq4Jt zNrc=R6Jd3qLGXRxV>G*wL*G3>&#n$GRCYRlgS=r`{8QN#?%Aj-zIjcbGX-kH@#svf zp}A)KfH;2bz;$A$aCBd-i?6$k0=aw%xHf1i$r0lacP!*arr}a=^4nF7H^Z(27p3?Q z_cd$m%{1Phy2{k!xbMX->Tz$_!oi$a*%r#-%r%%MsFUQLBZvKMGS$jyU%n{FdvXx)2Q;qquLGPQw zhQd%5H?n|RclgEb{V|4NcISm>(yaKc)L5<$+vd`}eRHEk-=eH!^M7O8K%#$NIBdKF zhu>|h@)^lc-E2R&&zXSSWdM8WTMu^jwuddfw1|E&2M-70~eN9cJRwShZ$}*hk9%sD5EqZWz}@+u*dUcIw>CTIv&X zEw{u1F9`BUkx0j-6+@zc>Ye{r70XhGSgVr`xRPccpnHgR@Tbe$z&vApHn_tU=$NY| z)1H9)E(u6a&@E0I2Y1lN?83(Kw;5fT@NFAUXPJ5cikBG3w0*R>%_m)V*{5C@1eREvsui=ZaEopaOC10I~wD-g_Eo>TbNGFcf9<&9Ek<^iznmAQ?e!ECUvTI9;dqDtvB7{OO|g} z?r%27G5ma^E49Bzdc*N>G`n=oOfV06I?m^U zIi?+5uW1(fnw@bB#E)7X)IQDga1`BtFh3>&$KSaKlZu)_!Z#H!m#jk7s*3;DXf!Tp zxeHo0X^Rbdm!i&XZHa!)h?y`lKaaOKR?ZAQ-@=W}>az4cV|<3>KN{u`Uf?&cn1Ulu=7_AjzR@*mOP zIPQg{^*}z$Gm0l+q-SA{R`gD6w-v!`^=hLws1V=J`Dn59yL6^twM|oZ3hp<3EqPwo#YnScw#Rc=BI;gJ|*M2 zL5?_krUzVGwg_`ZSg9|Ew9O_jR|HR-4YkLLlyvcX@agOhMs+~m*NKzoA~89NbD}tN_F)j67Z*UR^=o_ms?tU4=#>^(2e=l#7#=m7~z}BWH$jkxh2- zwLsoO{i!eWW{p5-ZGCAGK5Z8v-a}ru>k=pCVJmO%sr5J2Y6U0XWAhiJ(z*A7DYG3J zMJ4v?b{ZF+P<6i}wB#=^>{Bpryr0(TtI#FnF4`>{LjD?7 z+rQLj3VEyA^S>lU{m5=@eMB{J04Aqj5*Yw?x4p_t|L)T)nz|8>#KcGqTQ|eb|88){ zsDt1Z7sy zriV)VDS7~3Pw6I8{czH0cH8Vl&eruyS+A4Zz^Y^nkOxWM$A`kbKij2uqqg#Vi;jZx zS?6b#Kv|fNE}6;%Ba;Wf<;U?%u(es{M#^{l;cU_)=Jk{Afqb8%WSOQxT`G5hU~{cTo2l|mco-+%uRivnXqAhGQW|S zL0-ERJDwOT|B$;g%KI|u7i;$J7%P~2lSavJ`a9}WM0PsiPXMlh>;U6pvAr#Q^2Wl#DUKkNG&>9|pN zr|q3=Bz=H>I*W3Mmnp|oqd2!yvy9lom{#z6TRJQ9p32193Vygg+J%>&sA0Wace6RA zO@;2b`gBpjVaZ%hHILVdI#K4T&2j9XQ3Djme{owPryRpX?mZH+n6lPyiOyZ_Sg)B% z8J}I`dh!(OCMs!%;6BuP(jgb>(DQ>R1B)z1*$-&HCDG4QCvD;nF9jjh7-~+B0fXje z;cnDzsFed&XAt>`54GN>otjp`+voa=~`8w68fP38&7$kYgYH)dmK z-NvMcn<%$bW7px=u++K(o@rhV8|yy+@*RFVcD~4_tipd7Bc8_jZbg*A|A8j@fZ$V% zYVn%4PFF-xxRjlW3W~BZ6O!IM!Jc)((a6$CaNyr8TfLC|1-fY;QObsMhdxSQDi6&}_E}!uo?OFy`hK%Kv6?;DIMN&UAw% zl?x={W%Rmeb?N}bEm==%6AM!`#f-8$#9WvxW-Zb;mysa&>r0`nEciocAF(yA`7Z-M zCC%kaDl64_&)Se~?S!Lm&BVGg@7-UZPjh#q*7;PtJ2H84@{5PGK zdKdw5BTHy$i?n{V{B}Lig8$g!36y!Uu^b8wTkqw95y(d%{!b9G>|B~#X>rmeB+f$8 z9ez$ufgxLjN6x04ItnA&J5f&ThTU4d2l5W;uZ?2X0d%ajGq)EFG;pErK@2@Uf_g2S zZ<}k4q9=$8k(isbW4_3SEU9a1ttLEjn+B1d13~Y<{YWzlm9h%lzA~6awV0zmmn=cd zb>^B+>GikK_D zcv6nUH%!bX?3NXi<{Ha+eocY=lvq{9#C_|4`jnII*XBvuQ%<=DwdpgsTU#e9#LGzf z#{_Sl(TW!t0I2p=((~H6!LA>51vjFY5BSu12&LA?k@$->+3O_gKzKLMnzHkaJYe|U zqez(*X|4i~zR_p)hgY%{^9IuEwxC(V=GwiCPA{#U0TN^IwR!eHxc~;Hr6}aZ{BzH} zjB*$Xy&C;^D@*%&5BHnh!=J-0pqOobbTr~(U1;8<3dVXq?gsJyJ3kt+JFh|A`hpT3?OBD6!R|1s5yz{dX&8tF}LA$MXM!Ro7|=Qa9$u;=yN*j2{s~m) z*_6?w-_uHRXtqm)86q$?;~h#7zM+R)3X8I^sAkR zJ)YWB%=H-MN}&DMh#6&w-C&>`Nt)e>>ZlSfx7VmOfBVZqr|I{(-GMZk#a$@@d+*vf}FclwcaREK^ zKT`kS9q>)Zt=vPap1g*hK^*z#3M_R0iBeh=X4}Q%yu=Fr_fHvb8Thnn#u`u~Kbnw^(2A+#y!h^Gt`19R&`>=f45ZCyu0E~I|h@GB3 zl_jilkW0-QVh4}L;8pP(dv|yVy18cJ{nfy}$Dp^Y17)|f=Ll_Wy6`FG5qt3>3Y&NQ50A9?f;H_2 zfbYaw+gK zyNO?GF2=IhdIN!K1Kaml&9d!Uu#iRfFpGP`FUKRSx4eESjV$ovLzPWGQ{eNud4ZHXBbV zwrJU92cysP2icjcZ9g{#!y|3g#L=yCVe?X0?Kkd?rrlO*8RETB_6O zeK5$xSbiVf2n_0v6nX*0pIbq{=s?IfYy`AlGS!^oQ>LX_tZT$V=(!RVzSTc@lQkse47`Y0kEoci9H=C((QPB__eh6HOE+W+j44qHcrf$rl zYa@bs{M*wW;)kC{gP}ppztd%)W1xGuui)!Uch-<{@OkRopGa}>fCwy&O!2z+@q3d%XJ{d8pxXcE3vzKHCnwi$Ji;_66qId%SQhE z!X{YQVjo%0FpYsI!)A;`wfgiqN9|HWG|=Y|LMjZB=~fS}vRN zF`85hJNd5BF=+d^0r`jr%lh|=Q?Kzws}ix{^cyTTzXQzyHSnoxDig;MJ0+0aHEJ&h z4Xg{FE9$9+M~*5*{RZUpzaIzGuS);Q1f;d%LhmXoPqV2#{9tlFSsqdL7aR`s0NQ_` zUWcN6?_gQ1G17Xoy{;Cd%{{=fX{toECw=PM6{!#LNm>i2*$_bILHGLQ?ga8P9>DJ- zX*K)WcqSY3W*Vj+aZ%~@fqV#zeY`&3>>BHVr1#J;$Q#Rt z_oJS+M(4x{nC@*U4_d9k=bAI%-18*fI^7dNzcUv)~6vICieq zcghpz!t-r+!N-39c)HBM%w9Lqcjz`GpN9VTn`82m&S2U8GzdLA|FR7v{Cx%=AAMz{ z$q>KG3*9rjs-$szUe8P>^s-=7J=JsV2D%RCibTf%$^9QNo2dzCc`6WIdmkjP@@2#@ zm^~ZC-ibXz@=uw1klW3ePaatbE`7pCA45r}j$rtkSLmDRD)cso^oH&Jd;kOfeOG8L z_@b6N(ED{EuhGvz@&v4kJB5K=4`aimMc~%X7AqTEz)!0OD)PbM>d0Iw|R2+ zTTd-x7`(bL2-{`QwPCkF@02*I00p0HXfc8tAFGSkjWs`9+pK?76 ziED^eN0H{6;K-Nz;rKUyE;M%Ny4~~~*ah4s;k@u@CFa>=w7M9AE`1Eu?7bs^*odCO zTBVRC5gR{|$iJB0w!W~edkH5EWotUT!;3oyGXMLB*`as4;giV$?z6BL(9a?CWjv$W zQ~i5gW1nXCW-qFn3eN%ZMP-E7H9Y4vjx{V?Cv*(xJ>hlzSE!p74uYdhwKhp~jQB37 z3WYyXjY}VCMr~QD}~qcMSmv~6L^E~oy$P`g8xP}5c?)SS!+lh>>}zOQ~iga z^YXq}@O}U%E|=(B*n_`wG{i&F17$Ev*wO_o)rSJS}aGHzE5hC?fC>NmcG4t^Yk=9l&^f9M8ZHr?PE^$km z%==zChg6SZO~?bfgTc@`a&ONnu(7U3yksuZF(7dwob>!7Je|%X0&#+g>iM<5G|wqb z93N#f7s^v*aJMj=PkC%AI0$Uc6kte3HI5JV7yJqlZt)8B4g4C`1*Us6Q^y$U5p#EB z{=d^Ds!!^9dw3(80P!cSttnJZh(_v5QGY$${4a?86uk;TE;h>i{f!i2V@_EAJson{)&PL zEaDDuIyNx$Hl$osDC&+q9DN9e(zU_jxE8i}&xj9^I1Q&YSy+i#+N%P>VEsCFQfnoNA9pZEh;Z9LN)_s+79M zV$0w)VjjSVby(=BFuK;Di2fN26Rvgw(r@gy;t?~v)E(oe4M6ANF3i%a5a@M*cvmAh z!*l#HR@+}&jtXsNP7cK&a#=#lEs~fiSe$5sLZ>T_T0n%gDORr3QT6qV=-GSexC>W+ z;Gg*6y%^~>BVNT@OB$f~`^Ufi;l#j_*JxPKGEmlEnO)nr8VUJ6k zq{(WK=o?O~i7%>SMD2q6k&YstU}g3J;p=$O;3p>?Rr^=AmdUqOyZ5dN)swt3{UHh- z`1LbFB4*7c7L`9;)5iEsW5_;60C6dQ-p)zn=^Xo3DMGK2>)r*@94K$zS#?>}LZzDI zRiy=#VLVmpHBn1kWX$03mNL~kBi?4@1C(p_Gs+0;-)1``J|MrMYyWAE;=DYcOJ+!G zP8qesd@$>4i^Z)pl&M2G`8^Y!PBRXe8TkvuK9w~2K@8Z2u31^c=IttnPa!3YdKp7c z8H#*>r<45H;y(0UK}U{q%ZvrYrkh1>lYb4%U=hRGqP{of6WuK6E?!jlb##y zOWE`Y1neDxmQ6dVrYTo6G@rrv=Eu=+DLtb$WDxh8J_&+c-UzN`l#3KIJCoX3&Ff27 zAVW5iiG7J1Y0eRHi3=_hy-}HM!>L~xX|n9!slUh)wc7G)YBq}<@&akUb4eFfzv#_C zKHy3-BXp>`5BA-+=At00>QRsMbrtc*DirgGSv8zj z_9|z;99xNX#OK?_V~sSF)<25*Y`aeR`!zglX(TuvC?|+n0r_-*GI>rB>YDXHVj*td z%0Z=CqWM>3EelLGO;%{OBk~T?j7BA1V^otUW;&6hbb)dpWtA&Zww5VUJ`!_1o_=RA z7k-{S8-QL*%#nb99gf6xbnQ_L(mD|fl%uVixu|t0InbXG&lB_3Rl_fj&-r+cV0XwVWYx3^A6P-SsGB zMu%Z$r@_kW*X6jY8&@blGx9IWN6RSV()VQNHpN5lYG>!EX+AYGsYoE`pp8z zbQUu#S$|MFT=`-PEE_i&{qNf`nlU2H3Fx!o(LnyL2*%fhZDOK^?EATLo1Gfsb3csQ zbCB7b8K4SnqnRzd4s=v?(~LCKgP6LI?q4333Z#`FdcNQEJ79BpDCMnIKr>3PYgiz( zQW1SHt;!obJ_W$*Bb@n^1=Q-_vi^5!v&lanD@otNjQsIDJ(J+I@Fpb9SBv&*QO2pk za=MSo!1A8b_VX@9XjA#7``|QWB4jpDx!{Sv?wi?Hm#^Sdb5=uKLG!gvsCV-T7yJ_X zy)EjDD`QU0F5$oAbhfu`B$z*yV9k%w{O|iAYVE91lLXVwUuAmc{*XVlE3L;=n0>gZ zN^>I+p6XHT#i(~x9p9&ne4kU6R*6evKX1%z~EDz{s zT)c19{N`{gKOHD5^MppU23DIe?Z{}}e1W!lAixiN=bXUf>lQe9!YrBrEW#t( zdvZCtu$~-*@v?iJE;u$sTu(9s2YZ`hE++L0y<^cq|FO#{c4TZhzwX z^M^vA$uP`5uMbVyWZ<#lFL2;(K3;h;8%_?GT7 zi003SWkPvuEFPt68WIa^QQI*VhG_Jp7R&6_q&)`mjFLfUt!soWmySZceb4dvt4bbX z7lRY99&Yc{9Y5&|!5e`-h$iuBizW>M4e=Jmv6T8WbHjC@p%;ym^)hszDTK1_v#O}yyZ*$ zy4(nFR$m8?sejPoTOnKL;)&b$IN;fqb>Um@1eQB{BMx`F%Xil8#}+;JhOqHBVc+wE zIW6jENcRm5a?W+PgvPns_~G+Mxz?0A3au#wTeZM1H=gs%z>_fQL{skAI+tr2)yI*K zwbXOhT4RGXZj!ZbIMvJ+@QF%YD-Un}H; z*$>&Y7@3c^n#vbj@5Jil%hJ=6Q*h79Q?PmC1z7DpA1)^71HHa{HXHf-;;7m>q~8Zx zOTg-n%92gpIo0GjhYy|c@zcG^)WVUllsDnpUKLE!C=`l^A7xJYX6hr;jbMLbJpMT8 z1{Pkq*p2*q>W?|tWkxjC4Di6lA4>7`Mjbh>*+d-U_lRG4@(RM+zT;v|?Rw>cXM8g4 zT`?cMp007LfQ!QiLoii2Yw@q*qnW%u_oOLF|gSGG@w1$2+^;F!s9 zV_PalmKe}y60NBwW?+0s0W-DHuxA#xSn~yeSU0vl`*y6Eax>x{>oe*JyKdVvo9YfK zOgixghx3$659m4B(R;A}&M@>_5}-8i@{3)!SqS@28$h?4x)AWlQhkz92hz=&s8!1} zFxtE!+iZD_eRQ6tZ0lSP8jo$pJ3Bn*-;V7C$MDl^T=UHklez;Ny*`d&4_!3&cr|?l zzO)&FjW)P|Y<>bR9_a>kX52xOSL2c14>lw!?A`7B`f~UX{<2ABL8nth zkMX|YJ6Wrq3t+GNYVi5b5}l{_fosD?(`N&%mBt^2Yp8a>)3vLb;OziQPSvo~1uN0s z;t*5p%lYeVMrz>00pfMx$H`p0)?x>J6;24;nTb;A*kU|ku$t3ez^y;c7-<)G3VR}D z_|C*3bLpAQT?1WH7FnWqyFcvv6MNaszf|$KatXv9%wM^jf4#3QubLJK_ZD=+kNey6 zYr}5wl@7X8`UB<)*m%Ji`c~@nY8ZP@I$vPAnK}BqxPKih*90L_>ci? zQdJYoTi6y)ow8uGra(VeHu}W@{T%YTQf}Ka8tyLCN3pM>W_o2l;u+66sjw|F;&X_LSDfccg{`?ug!jxrq~@gSsC`pV6q%74j2H3{0yFn_lby>Uoy8 z@F?VrU5MT2dnt$f`^VpW4`kNP*KtN^90)D;`H#NuB_I^q+w6svpMQ(r!-4BgVAZ1&@Q>F8vkWGQ~!p;^ts85dam`AFmrko$d>oT|pa34~1~K3a{dIiVtAYo~Q}17p6UT!7qx2Zb=0GYQv4TVb@ByA__{nCb}nup zUklNf@3DiRr!xUo=1K5j#ue#&Ebx&HJ~NN0{+gy8+vA+0T5v=C$T9=zc_Yd*Hnj^q z?=CE!N6lISZ3ei2?yo0YPq!of^8Bb!|4BpZ*+>tUcT`6hn*;d=J9c0rJ^MToYX(|N z*Rnh~>90!X$6CbC<#QVzVR!d^NB%vJwA&FktsBIsZsl8l_Tt-Wr<`$vefS3W%5vJK z;N-lCFrK~}kaQ4MI&5OJ$Jks)L)X?ch3mHG6lMEC5ME5Rg{HPfvgr57=yC9Na%*+! zl{Wky-Tz6w3tkhm*=6@Eyi=TkW@kQ=$LhM-^>Tw3S?xhGc0uwh@R(MrA)k?6eyXqP z&OFBGeOX)c>uk=^04Qy{mEY-h6t*aK@P227M(FO8_QP>prYs5Xn({NE)(>d?IMoLT zKPNvHIu2uACc^Fpk?i~GDOl#DqXs=cM}MwR_L)AD$P?6v?cPZ5&4>$F->~z*21Ijz zH@Y5A%8`Utw>f#73yEbNC@z89S4K^OTm;VdD3{-o(UsZQ?hl?Yo)Vays zhHitNFP7t_K3l>oAMB00t)_EYB z+Ti*x40Tdd~sC~VcU67&`v;)|_%Nu*UwJAV|$G_I!>`u61m@_sU( z^K&3#Y8>PreoFVfo~Q5re#J(tHdLt=zX&rBUfd^#>VYXXxlr-?b!3hS7IxAMrZp{MyySBUCrJXn=o~EKF)IafOC=@ zWzySPUKygR&&B&`(MRr`Ll2e5e*%{KnoA~CUg=+iufw4CYR zyomH3SiE5#_Gx_ph^ZO%C_DP+9uxJMu{Mi&xAW)3nke|=*>HV$W-%HoFRq|9?g*48 zpuOh<4A~mNuLf?%qkmJWHk(oAc*Ti_u%{g@!Tm8g`+~Lorx(8AXn84DX5qc4UM1e+n0Vm{K>9|2pdev6gAD{HqWpJ9Q@E{fS<#55%=5`eh2hkd|Y!qpe#eW^A#TT&X;-zTVcZiWAWJ=6R~$JqGmL4 zZ~%6T`NOFOAuh0l(SCBFKcfCmTSm}1>j}@tQ%~=aUc1VY@mayhLht0(Pj+(3B2rAB ztHeyVa6sN#-m1|t;ypun^5im5pDQz5cS@_a|6{AJ)P;^Xki4?B{4V|6%Z>VaDHq-`zh7q_l2cO4wB6^v zVXGo<^6+*lWM$1$f(K0(*@#^YG=@1zX*i$mQ)xJ`y@q^>_a37sm*)1yGs6e)%gg?Q zhxZS$gTr5Q@-B&(2PoT+UJYi%?TmUJobq?VnGyRq~*FyPTG1+ z><2bc861~`rJ|GTYuB524TtI%Da01c<7!jN&}HB@v>{r~aKWEhp|w8Ibzdu5duv(n zk zv6IISvPK)uN?u>x)!2-gKsgO5@4AW{BA6>^ZB4dd2q!Naby=8}>_^Y+Bc>4?gdb*j z2tOju{=&&CXx|wZ$4m^3G`C$wWO^JOd z7$1e^JRR_sv(+7VA3jDMy`H{HWBWl)o?V+;#4Lg8NhXiteh1uf(B5=L=bB3_47Y#x z2hx80`D(Dp0MZ;cSEXR>9!_h=3!jB!V2>?+-8EZy$Z`z2`j7d)<4_@zohZzTY2D&a6bz1G1>o|5o>d>!-cgazj({ znX$?XwSwjy;p2=_E;KlKWTd*GeB%H`X92mi-%^ni*tH2%DSJZtoH%?2 z&n_7Xq!Z9k!yX07cP5N{hJ{XaHL zs^(O$O3<7BqNbo-8xNWjeGKcBi0bJekztW`_Qrv?=-J~|SCkhm(kL_N3FKW-@a~g7 z(}@cX<6X%Qh+p7$Q4xHrlK=(x>D?29Yl5f2*y}mHeVYlS6M?)R>qhtO_DmkA_2MF5 z`KbF-{d_lE6`r`~S0gW)U-sl+ds%4ceUXKd?wdTJH~E9^ zJw@h(JL6|DyW|Y)*CC!!PhbjMgWFpcvk&^2u(6Ads;2Zr^Cz{TW|)SNMnYHo(QzeX!QnA}xjymSMw9<)o!{Gs>6 z%3XQHzOjt7HY0up%90pmZAiGN(AWz;!u>mQDWBPm2Az`0t4Q&Q_qDPNgh>QAR3smDKV4WUap>EsLRl-%8{cJl6&7Vl)RLLxbMIw~=@cX)YLL zt@J+E36Wz74JhTCIjDp+Am*O3AV$2z=~`t`ljL7PWLe~a$^STEZ1EpP+@aND#9<)L zop@HuUF1PTCaKx&HrLD2HW^pLd`|ohq^Ggd<^V-p*Wrfdpyzs?v=Kl{{{pQa^AFj? zs16uqypqUI#qrl~r2RJgu7Fa54lr+h7>JxzWQxSog71Mah<{sVAlDg~rI2Uil+SAW z?9PnwSlB-s+yA;mwR)2<>bp%;lp2_Is<)L_kfc#hmw|XCOs>o z{-jo_$cZ&Btx;rNQ@l-8;RB{G>P7m~nR1JKkrM&+J>|&uDDpM(r;KV8X4PGU0p9zx zSsI<6I%xL*PMHsw7_IVJKYX>QTfS}SeC6AZz4$oJ4dxChM0v&oKJQTKwPZ{15Hy+l7$3FF-ez(5#9-xl8JoKFge=o(IEyL-&{6Kg5 zUx@6{PPU&l0(QT%RYy*wy|}mE!KKo040X)Gm#><^m9B|=kkv{2xp^JB>`rAVb5il~ zjAgu2(sW$=wG5E0#5uCFBRn6X1PmfkEKQ(;C;EB+G^V-sIyoP%WtJ&+qABP$NH-2 zyJ5^PF$Hz3j__fLbD(N(9CX+=3ytl}(fOdAL}R3eL^Y6BU+aX&ELWqI({yZHk0*4c_mq z;@#-oFPhtLk@2Wg+`laD&2R=vHGUR~H@^=TNMN4PX!5dpT6giBT;B zTR4sv9;yv>lP3Yq9qhN;i&NS=m(bYau)=K64d~CV>F>clk0O=0$tsK+GnNNAW$@g> zgHqck!*SHZLQXY^MrXcI-6UYu)p_{!Oi!;#tfji_=rD+mJfr;8(T5>hK7r#m1If9k zA1?To#2eL($C?aJZty1`BX0f2diV`x&OI;U;wcu=^>g%mSNdYqEAJ~GS=<-`pBk$7 zuiA2{Q7Q~>vH;-2L*8_g1O|71GShcASmUTz7;)?hTk<51zQYe4hU|p5>-F)pw<7h_ z9(u9rb%lOLer6U-+sf3D&>JZDIOrn9lvs~APp-N8zkcj|kEj$@!!7j}i&J$2>X z@6xf|s6c2n(;cZNxX;PvIBc4+Of`fr&c2t#e|H#sot5=d(PLaVO#Ea4v;Omd2ao%4 zz1r=Wce#|2N#ca5ikHCS*fb-(fV;bDrpcnKyNW?=A&mw3Kp3Y%+8&(2LVhAoFFwkp!LdUb3n#*&9rw1ea|7oeiWBW&NJncBHZ zPg-s75A(B^u_h0O@X(p4oG3A;J>`4Cs{#q$HK>IrH`#I2X9Pqmm0h$~9(!pBZawA@PTGJ8?{X8}C}O8qeFydDxhV}R~PwxU5()-*H*&n+5&nWouTJoH_rhnC4n)Q0p-gE-6b#nRX>?BHTOxe;-z`EMHHhqmcxubwY_d$`MBH zC*I40zQKBGqtUrYH41CTB>?pm772Q1 ziyq38svl5r+6>Gr*TI?d(-k^Dd^qwPIw{-0`coJ^KdB2JOH)x`WBJjEKo1_m{8MfM z51C1R37^{SCV%EmYv3N7k7ezQp0Ll0(hjY(T-R{%J@k%{hyuTV|n!-Xqo9yVz?J|Lw`jFI3t$tfff&tw@WMMa(H zI1SYKp~;M}l^q}51E#rT!<_Bq%IJ3!ku(Dje7c6#m+J)7Q#c`^Clh+X+U_~pIn;p( zL+`Uv$&B}URU22{?*jJ%w@CZ*N3uF6w!*6g*SJ}i4?NGwRja?2ht8G;-w2W%uLmd> zM$vuO&R}7xuXMUaZI$MXu=O&Fo!gC% zw4*)RN0|zZ#2);tkunlwiH5v~!EUtJ&!mG3=0s5ia?40?vOd=a()e^P*-Ed|0^3YhU>;*=Ad-J|}ldG!JT< z@%xGibAT|F^v!0{igOgwEb6%__RJJNa-7;5>AJiu#|%QDlWrZMY-M46X#Lz(K3J(j z-m!;#{eeI@KDx~7f;5>$_k9T2bkCD|R~d9>4-nq6Csj*8XqG1b48c>sicy^~Q8$%8 zi4Ve6jQM*CgLGfh_uS;!Rsr}UX9EoWVU9EwT=2UK?HN$;up6w(y3FsrzQ~q8r`(x* zd^7J1Agtp7trjD3t(VY>?`rGu8?8-b(tU8KRt}I(V01k+Hw!^vFGxj>0V`Ji>s{F(X&$8%CktArjRyZiGz;8sEzjFw(y1K>RC? zE6;QJ&3vC7;z9!pZPeazj=*kcc?+i`*IV#y!SA?ek(rPMkjnEZ1!yyw&n%x7h9chsBqY>{O zcnhf4IBVTUa5WSjiXl8+$1+WusdC&#_`Qci*Q$HmWMT{GMBe^ZQ$yCdtv&XfV#vDE{pHtlltEg_`=s;HPN2e7hN|{bMM1nyDoTpYb`im@*Mx#9V36H;1e|o zW1Vw>w4*}!B{Y#lJ`{hHC@5 zLq_j)Z0V@=!~w48Qt_L#+z(#;=N8gDOQe-?SyTuX&7yUvH=o7%r}B}`U+^;NutGFi z5e#NIz)7n!>N!PdJ)u=*)wPGq_j_@{J3ZtaR?l)GP#*!|0b%xdBr(2s=u{7YW`#<^s4Zq{B0lM z+cDZJpVwc0Q0vDv59pqg$yMk%XDzK4aGQ$^fb(SX12+kjX zG}jp2cMK!!LDAP%2_D6CO%h=&Oq<6g;%g}W-d`%P+$}I2i*)=Uxls)q0wLZvB0GJ zXHH&G-T$(TlTPCT|BmMB;{~Y-X`F$48XsC7D0B~4U%sftx`eG)n84-P%`QnAc_~PI?KchsY~l;K_|v0_6cL?~XMm z%%Zt|Dt%6QMD;igc1(JU0lux}Cbod36~_s)>&x{0D%Akb4xWw!ue3QXasl&oMwGjx zAoT|?nNJlReIS3cNhx9NWTYeG9J3?R23W)e= zL+=h^vqsDK1o6%U$vs%3|J<`W%5i ztsLM*v8ssc8#~ZlXj&N4nD%dYznT#bDMCMHJ(>Z(6V~A19~+Ry&5JSt5aT9zCGXf6 zq-z244iM1lJG;5d2&R4q%`2aGu*p44;2k3k$b<$SQ2eWyv@9cEEOZ8*>-kcvKS-~5 z{Z25%q3_nQpglo1I-~-Hm2A<;8kjJ%#R(*U1uIQ{cID<2VTX1>vVi@8N*E z1zOLt#Q3woUkv=iw7CL#3-LEnw!qhRy{D|~8?D9jfelXs`C>*`iBxA=ueG{s1d;|9 zI@U{QZt}^bN%k}H+3eJ}rV42|)uNUmES?e%X$AXX&h3uy+%E(OqhuNv@(fMEWd3lL zu8x)oqv5}cWu!IRVM$L*O@4n1m9SM^e9%QDZ3n_1PzJ|F+BK5TrG+!fFBtV7r_73e z-U=HorZtP|WNGf^dEm*TCNgme6WUei**8|vqzAdkGZcxt@iL`%jiL^ClG{4De$BZUieuLy~x$#RE%H)F7-US(q@;AZ}+B>NEdWE=6 zvd=xCkQY){hRgu}%-;B+`6Sv0vL6b~^|90jN#~IlaAov8xglmj6785up zYMt<3a0FlAP|fGI|4X^zamv(WiF7iYc<~v}TkA;I29#<@r|^Na-WK_6PP~DHo3QBo zJ)xyIWp-G<+c_lu2g)44a?fkwlaVwK`(Aq_t|_|@J@sg7ON$%K$-sl2x!$c9`YsZh zfsabwtgYb}na0>9Zwq;hzLa5HuIG}l7uR|VdK|j2$N^|5Hm)cAvuPD5hL^_i+ zS0yg+-mmppRM)D|@Mc}+GSV3k_GB{9*h5;}QeGJ5sE*i^$ta(Y7xxV2BG)|JO9RiA zx^$tO1Ws-W1lJa2aL+6p5C3k$3BPdFa1YR3-vQmH+!5LoNJHT&^X*8!PFriLf0D=u zD2por!X19kxwB0Ds)k$ps+6Ij-S7L_?0`Hb>6_2|nb}?CYFaSc60wMfIywQmj-kxO zMm_vHld_eKFa({k))H4~Kyj$hR*ZNNg+9w!U%}D}TqWXk;c0;Mh{#B2Z0idjDfRXA zN6Nr(diD;s;@cao-d^1EE*my2mOl&pq7j+S5!YIr2Z7LphfM}?%Q06eAMTDc zuPDdWQ3?By)BUSS{ey%TkYB#a2Krrza$3rKKX|=!e*uJ>04y~bGKVoHGL2S8`-nUk7C0_&kNRAV?Zo$B&K7U`$fDrNKvd2Fh?DA6ZCxd)OrkjPt-o}5U1`U8X}8Z+%b6!}c6 zQ%i~O&mv`3jI=ht?f;1{85NJObu0M1gTB~;CxGqkzT9Mp1H?Vj!P--vV#bHrd{1dE zZSj0UNs5d`)5K%S#c_>s-obLoe(DQ|`4xtSt6TGB?ZUCa;|Of>YXWY0_!xA94nvpt z|5%W{AGR{DCwsQCMW3v?a(ulf_#V-0(t0a&?ek0$Aq4y2#|&bzI>h-PtjX^s2V>WHWD^vqNf7E&AypZ?WT zPUloWtZ6(ny_o>HEj~+I9*%}yfh+U&;pcAj&UceQw&u`f9)0JZhpW#*c7N=vnhl?tcphhe5AywR&+FuL~+*f&B)KK=M54({TI zy%xnl*rp_GykZb|n=srrA`H9tsD`4**>LAo3*M|{J?v}Xj6J_Jfayg~V0E45;AnXX zsHWK5;2DtMw#Uo##(JQ!1e4@sws><(upD;@TlufYlp#&9cL8a&S(~AHN+cwF+yF*+ zibplB#8sibFudmqZ1=v1k8fBPFZgx>x+a$O*gzh?EgZLcWY5!{pUX~7If9K^UxH)0t8vma38!~j!08&9 z`7}eQXJ&=}HD9YxO>r7)rhokf*o>)%cXtk^^%s}R3uZ21(Iek;?|})hxKSGPT2x!k z7+|c*N9aj{S<5l6%WBZ1-(oaA46rU(Oll1~BBsNN?Bx&}o5@XV^ks2PV|~h)moo>d zc{mYdfZmC<)q>r*((&*k%)j+E*y`DYCl^nbTpzSTsvBH3yOG>BWEXFBU@2Sn!5C`J z)4u5okMd2+FN5*Vhm(ZnVaBBw^PaW!+GMQA!${f=OF_S3x`s+k=Tuh5{qK0WWeo%+oW z%g>+2>`Zf(K50MRd-7JI>j64`@v&w){H={A5LR&N7xv%yY#eg*0le&;&0_oRf%$WX zvVVHlBsw45)U`9TO3=d@@rQuwl#OUg&wIM(!Tqs+8T~u1v6&7w4i4&@ZTc*tDwVnI zuw*N{$D`GDIvmQ{lJl(5KQ2`HGVnH<&1KSjH$TT{N zE9uGK?S(`5)Noxn_KX)i_VWad%`2(Sk6rL+qZyQy|K*1`?QwSfh4j*REf8;^+Or`% zTsRGP&+H9vaG5k}P5@LLZlQj7Z^(Pz{DRcyoazI{k0d|k=%(h>pUm6MI;X{sxFE_h z>b9o3m;-CyPsd#yVnrW<$Mqse8Is9F?JpT+p^mB5iq@g_<(Ia50@aL?a`hlK+|Rjt z`VBbr$A~@J&{i(|TES_)#k|9(;|`c&P5WVvDp3gA`OUOuFy@?v_S#c!Y@pC1f)P&0 zHV*WT#C$i*)yss)iF@G3vwE;6${d=e*n;lpMEL6)iMc&e72m+U5I5Rdruu^rk2DZ{ z=C&h&(e?3!x9!+@i!H45dNF?bm#h#@sE1@7w!h*Q*GnA$o3nbe#(zuj*_N?{<;Q@= z1IMl&&F45=;Vr6Lp>OC4I&TNnX}T#COg;#IYq!O2eTE7AWFMao5crG_miA&f^{bGM z#}fB>;k}6?>F0mhg4%9yU$v88?t9B$xZMN71w9fK`8^s%QN8y%nm$BY}HOhjSKjKs%J?bhHbLOIhkEy*v550*RJ>m#|RvtwO<3p_@n#qF@ggiy#IKZlX;x2 zh=_)zpwYOKmrxb-Sm$>R6}i(HI*COcUElE2Em8-YuN7v9f2?g>DWN?r_%gGs{_?Sldvo8jxf7}wmfm4 zhJPJo$6w5$jM+246>2#9;dKhCrOk^Dl(5*;90JsO2_BoTTN)(7euS zqnW1|X(mS63rUMG`>LgsT|JXXkD#xer3^Le_C>Lg(R~cae~!OKY|^U&}~ma8ZvtH+?4!p#yzmHt}R> z1TGy~DG_hu&&(5Gqfk9jxk4Fan-RZ(R+C5;Q@v8AuEA#@}CZX%UpNTJ(+2EwV)Ra zVtc)A$deSR6}CRSN<$n;&!gmkD?MB0^7SHqOR`ey_ZmQROnZ3R6!GTcuX55rG^Z2M z@YzB*;=ciehFTF(OP+Q1J$E>*rxJ&W`NM;wzeD<79q3iKLtrDW|Mwke%-NS8cRAIY z+VEWmb&t1(_%D#rXRPh#O%UrB4K$aicdI8-4RXRlSKc4k1%f}A- zjyScOiAo$J_56|sN{9>FAJ7|4UcSf52F%8=EBlc&FI#xtS}q;bpO0SCKo#6h+Jq4n z;jK-*lsby3Dzp-D93y=PSz~ON;1tqosJkT(Yrh->(>w#XcVZ0CoS<{lyQrJ71}nRl zB55jYdH4q3I%+;|_ufN`m7*T*J~r(R9itPVvg8`iSm&gQpY@CI;HPHPLBZofQyRQ=mx=Qr z-|rPC&gaHI$H0ly#`wf=5;HvEsMQ!WCvf=VJgU9gGU-36-*?K{CA62r4O9Nqz5>ZB zNHLmN!6kgxyH4uU(Wfy>XF0n?&sQ$e3*edkT=}8ueeB)UiBi)BhCJk36HGt97)krd z1E+d$(gLI{9Kd8r2;b;D2Za_V?FiM5ZGo@|C%xW7zQ~2wE%*vEtadQ+JLuqj3<(SP z(2jGJ8uv#)o&l(4@xwHLlw=#}YQbsJl9P})o6Fw<@WHRcNY}`SXYqQ3r#jJM1rm=4 z?q&l2m%kW@O=TtG>K!_ExNCbQu1N{>JiB zwlKmFg)kdQOL9-Mh46mhH)WUmeyl7ROrJLt-vi+nFT;oYL?n#>q${+VC~{&;SbM>S z*5+RinK5Uut3^6tg|+313yeoG)->*X-lvl=#<(_!o}+8$)j8W?)PX*5W=tq}?W?VPI!^0( z4bPW+oFaIOV;var3EOb1H&XAz>iW89SNTR0c;+>Nvs1KP(iXRy@U~`^HFcdbL+xEAhc^nd>BmqXAOm&d#mJO z(ZH<*d*ySQ`ac(G{&<}q&Twac93Ma24Q^i<%So$|PHxFa`+1QE0H`q`zZMNAt%hjp zjpmHY9_O^4m1-Ggjs%e5TX;Y@8c)6AOAF^Q2kQ5*mD2H)FhyyEU)h0YnKb31SDXh=* zx$Nnqj#$3dU(`H?++Gf1PDtDH(Y>2;(k&?Tmvhr#&5viv`1ENa7k^r>IR}I-?B|$% zVlJRpCrcck-dq)#z}=~y@Y7+iz*lurz6DU`0b;yK14|;02tM5WIE_18{$dBD-Rb*B zGQtdQg!F9AIV+(tP-HBWaY(}4}X-a}g6{I%3K zrY;WK;3g;OHj>FdGN1p}F~W6i{&L`HDg2pxlL@^ivXlIlsb1N^UP9X;=`ule%>{qWMLQc*LE^cjv`lP2;6DPZRn zEXXsUe|JW32D0Vbw7$sHq91tW?nTlp@SqFbM=E{=LJw1Z!6~PZh)+rPM=F-vUjX5? z)N3C7+lpb(dD&9Rj8c&F11G-3vyuytHUYyESy1@ZANI0Qd`i|5{-~k_?p&awihPFn z27+%bftz-9Ks~co+hY@kTtSC0cdRwzgur&t%`lV+*CnSsJ$bK6dJ5c-v@!b;^HZ~} z(@Bw)VAkU(8Ydej>SphX>qweVrJRcr_+?(jcl?>vvU*<1p$wqoBFzyHZlUmmExfWc zl${9vMZ-u#q@Do(*7mS4sw?TX&UkEWL)E48DM+#2gQf$!A$ep*wTk3VN$)=7Nhh3; zFo3cqOMcGd0=x9EEfQZb(jhWwH2Q1=&D&Cb^R}J3Dzuz3f*827r!C%R`xLJ!9fW6+ zzIYB+sjnDes8)-LJV*GWZOR6`LGJ;Qz7m=q$LjjgnC7Btr~~^ps*L6YHai^^xd&#( z^cG{x4?cQ^|Dw7no-cPOg#U`j9m!iEc{_!CrkWA2qqaS?4a_cG#ngc`cKgk+pUyeL z-5`~;A$fsUKwRSW!{{Yutjj^-Zbtqb2hQ9CwrN)x{hW&|gvN>RF-+(fF8YwTS!wj@ zh8C~MTd-qK*7BU&7x@tGFwD;biO)nYaN;28NP+u78C$-rD!!GoZTCLwD>K=K1Da7_|=_PWWGs`nXhN}6$R%gmW1M&}< zoU}LPJ;_MNkV)q=k9q|-b>&VJx?tOv{&4BPI3WHLdV=W>3)Id#WildPWG(*GM)>I_ zbR7~@2;ZX+rwQy67!Q<70cAM~&A`3vJ|`_rn#doAR_p}xwJTwF>w#GH z)j_RymP5APNo+oT5YRQMf;*B{?-$+>cJ#XJ`PWxp>t#iTKpBSM9qoD5><@u8pL;{> z84Zq!lz?(!!O!yc!t2aCj{I&~QTmKir})*`{y^g*7oJ;I4=Wa3mN{10)e$dgduN2`nW;Bjjc zomVU)KI7-UT?5L9dD|Nwlo^*>ss;z@;n|uv7P3U-ov@)byWgLK3z7!))`B9Ee;^XSj0FU2^j`zwZv>m#p~!O6=j z%Qn6T(xuGx!Ct;}=uqO%U)n6S!DBs@c$oZhoK_>mWyG?@+iR(w!|c?GHyd!9=?t{| z`UKC;-3;!b2{>=&S8jZM3m)>HhgNk;*`CDS^0=(&P~LnL2JQ@l2j0n;SGydkXjluL6kExwkC^bAdp+e<6G!t4Bl1xH z?q*aDf5*Su=src1kJ>Bl1m>QqhZYw7Skd}i7}0Dc&$>}p9{)KN^CwKhk+0lY)o~p* z=ZY;%-tbxbJElP~*dylvw41m?qGQl|yA4&k2AtK+4G(T>fw%P@V)>zgp!;bpUXJ|4 zSJGaLUBkv{=4`uwV{ddRuGgSFKIs*OA>*R(yTcE*@=y|#q|u(qxk2*8hzQVe8wddw zySR1VH4xZf1HbsPJ_bE6;sq_TF>#W+6kmA~49_%E=C^g{pN>Dlr}f-_grD?$GWd;aAX9Rh+}Pdfye> zpclxO9OQ3CRkD>UPw_iR{V=d*7k^Tm2-fa9_`wKo^o2;xoBVJZi}_e!##wZZscddK zh+i3R4Xk+?%xZCl9ke_QZbieO?vNiaXLKBl+PVzH=k~oq)NQN2bE`Icn1A6K_O8ue z2-s>%=TTQ)-y{s%tz09Ww58|An}kV)nHg;8Gj=)aE9dEvs6%@CWA3{;P-bEAVn$;GZ)>}N^I97@qVsCd zi+$@gvC{yYtn-gW415ktPsie}r_1sCoaQB+_xSUwb%hwW)EUx~X7iGjJCwVDk$Cz0 zO7L#o5f=}yQEEmzqwVv%%G+ffsUFN#r|LYO-ef#}q~}PS>ziQCP8EYM4pdBoYO7*y z{v5uBl~eVQYF^YeOn;UDCI>oV)w&MwEA74M0G z#qm9$)`L27aeNc`^7av!<7>-IQv6_Oz50r{mV_M(0Uaap&p z3U{dNcm(?-c!KDSBI%bHe+*?7>eiqpyhsY=rbafmSsoy`1t@|(KQCA&M}qMaozdk z>KJsiY{=IqnQ3TD;gZWx8v9&4u9?ENty&BPn+v3_VUL+(t8nNv-HOjRSc(ff?1r|Q zOOopq;E}U>v9Hma*ln+!uuh8`Y7v24%wK?YD8fFY(?a8U%)n5mL zF~RMnI1e75Vj!({X@I$1vT%FLe$)punde*`eD4&4M}Bi$ZQc%j-wfmZyi95C55v$k zjyPgN9dGK;Bjh#%jBCtmC7)(Cc zr?kg@Uh5fMGqb<;6qJn(_E-W96aymCe7o7Ok%7$&aT#Wvwrr;sf3f(0o68p769lQ!!=> zw68x?YG3~i)iJ=tZ**TE`4$^pJ05o3)0ca7Yy>4H$v{2A-HL9Y`0l|oPD1GDcaUD| z7iboG!WqvYP}Ac$Soox2=C!SCN1b48Pns^T;(PUnpkcx&C9-Z)t|nW8U$bqRL!~u1 zeIDV>vcn+euVkVZueVP14By^JdF7DG`|NF^s-b(J*9qE>Fk)-*5RZjYD2wNWAp*aN zBQhb&U4#0&ec|M;519O8BY4a*g5{36*naH~h_*?`eodOo#f|E!G{1Og+6ex6TLhYU zXF`%iI9#Fi%L$Wk-6A(EjHdOAYyQLFL}Qui8q;fZS;aAL7+hyN)zf&KUi$;md85nl zZgQXURp9=vk-BhwIcB+g;V<2_Z1(3L^5z2<;em5L^_L!W8rTX5r|~SkYbxffbjl&2 z5pewRzCfJCztX;#oxVrn-dAt97}xZt4e`qRXtZg#7l(L!rMbD}<-bY;?uPoZ#bpzj zum`5so(p>t&#>C|0T3~83cI=^fQgzUKI9|L59jS}?g!Ey7;R$<`;E(Z%@Q+OlXIQm zc!4F%PG3Ue7-?nmOrcvCVY;$=Lj^Z|KaY)AkOxB7EVT88_aBZa)Sv3Hv_607});}@!e#w$+d_3@1Jw; zs~hl2pEe3zJMnU3(o@BPk6Gd^g`Xa*;OJ#rpq9rmB#p%1ZrTD>A>|Tbi00v{!SH^< zDTR86SDjf(&p&P@TyLx{?%WtcdON_JR$XyN)N>|PMKP*hflm_g7*1~*h;&?7Q?Q(B zwviMRRK_oq7vb61FeFX};%cl}634|H6z#IW+`N3{k&l@w_(gEv$H~Z!v^4>7Jda1o zS}hp3Q%AMR=%kX~!11@+(mn1j=(ueI2)uB8okP6s2~@LkX2t-#*qu|K-RFXno1Yp* z>&Oy!T})zwGZN8b-x;v!l*ns}F0h9Aari1|6sJ1EMSpKG?=iYqW@SL{hrUDmgS0;E zyhuiK#n+8I!{3g*2-JI+b9+5T#<4-aie8wKCjaH8k&Or5FOKn|xcq-uf+6f64NsBjT)H~Sn z0`23H?+m2duxQ>1{(a6)t}J{({Xfd3wcz_*-Ur@;y(5FiycV4#aFwe0riNM8$)_;$3A5-Kz%H9BCWSj zML@Ovqxitsf8{{+WIocye*HKP{z9!TJ$$95#%t}Rzw-3?FO>oekg51M~g zaf0MgW6qM}R`T=nH^Qh*x@x0kU8SM*Qk0>;+}Om@Zg|uFGziXg zY^AHx-;8jN?)#kv>D>yUQJ7B*;sv7L(0WIT*Yh2*-Fy<{JY)?Zzo(;NasSmojc)Qr~xByz_xl9!}li# z!69|K!n(HR-7BLttuOV|@?r9W*5G}t1(LU+weOB_(p$XGi1{EmZ^VMuYJzf)5sxBi z6%_jDlkc#upLT|V(4mCK^16||;oY?n zK)}=s*s$qle(Iz@)GEspJ{qZ>CF+0nz}XF#dZtRG-9TLX?IG(K^#T+Ab?VTakpJNx zh-)}ItdQTd?*)nd&jR^5+|_9>zPdOK3N^LlHr)onqpgSFW?2*Y_L($(eeN&5spD}3 z5RS{Ed@9KkJO-(JJ3Hk62_xMMMSo(w_USPCnm4UWW6EFX90SrdXt?Mbi=L}$&i?0& z{^>iAxC?D`e6+t$bHbMyoZ~|862^H|ovG%T*BZ&>Gl8@UBVH48fuy&QupD+ZJSlXT z$O*uZo&gd6Pd>dkZp?8uu>VXZ_-1~#g6Sa^FmL8U_&07luGy`xlExO;!AJ**KI5c~ zknjsrXQwjVN%hssk@N6MWn-jF0!c$?&(V2pDyM9K{nPk>_n2@m;cEp?h}xxdAU^Df z?fPkWRZUmH^Kx`-bDr0<5XD&S9krC!Fi6ts)q4k8;`P|^AatRFaUS7cZARxzo^>&h zZcxk9rb(|7o5=xN)A7jKwpx8l`tkVAkhw7Afj{)Gr4oL}18G5pw5q^TBrF%a4C&`P zg8e}Qo@Ak`-i2)tlIeqPt^Q-=f$;n9G_cV*2~B+;Ga3iF=U1dkchjq5Q#%v=nQ3;UaVVR)`a)Z53Jwy1TWZ8gk`h zC$tfI7>ATiMd6K3mIOlkwu80$j<|&PPihB*nK(GZ3db79@KqlJAm{i_Mm~WNM<26Z z(@T6G2Cm!8N#o&6Cx0Le1nMQ!|Ei;sx8a1_l+|3u(1M2AbKF{Ziu+!D&leA0iR@4j zCygK#Y<5Te0lPW%nwS39Fr*xS-Am~%KZ!cYmo`2i@**HUQs|u6=V_g&-@h}$BP2}Y zgmp;xgLltc;;vGAsd~LWlPl=ie#nKXlFTiq&f$c`>|hUF_`Y^M5|%=GV6M=9;5e;B zdHw1v5bqFHM{tpcB>$+R^@sMwe?VkQb7u>CtR~!JyyK8*7UC!{nKp@U#CVxNlU!NkemcTHl2{ zxJ2^ z4+xhO$9v@^(vC=a5r}_~d_Oz8v6`0+Jjp)$51^cIGfxVq_uYzy zDVq=YapDfH8g9dD{hidB(Yu5OR!NHp569Qc?+mujgBf9;CjNaZ%3NkC;nN2QPpR3v z_z_S270a!9S(48+MA9UrMVj;X#!ZCwqU_F2o!_tu^(+f~dYp?~?`w4?2w!?U^B*T2 z%Scnvx}B%_!EfaXWpnVZcplOmsq<*f-Me6~KIKMoxyd)rl_s8&Z5Jbjyd4u7iF6?+ zUql?2$3B`h5dADNL5MgxpHt4IQqQ5#`Ng{XIr%+-wXF1xN*TsK>Hj$GNgTu<6gGub zH(w#;6+%zZ9NMbH7bwO?xinh%D4@Aid}d9Ttm|onUq!+Rpv;j`K0_L$scK)mf)UmR_;*Gfb#AUqPh^8UtNJ-^K8JBWY!tmAU z=wC~gANj=5d-`rr(b^Af9UOstE829jLg%4m0eb8R?pbcyVMiL`QwqdCmb&d?7h} zIH}dHS$%g%i~bH5J{9sOycBo<<5!!&mgm;Ecwn@0uxJC2Ck3g>0OnZ_q`dWoLV1r& zyv}Yfn!#f2jo?M6X!g`SMO$y=`-OKD+MPVba9;h4)&wC<`{<}GfBNf;;Cm2Rk>TT5 zQU5~kgQ4DZ5F9|veUhflG)dZ+7Tk>@PZ7CH z8v|Oe@$LucFom){$_dG*aN=e!@&HIZip7mK2`xoyIVhBGJrmxDi5@;#auY>gl7?du zo&SL!J?ANK``RrVnKCQN!-7@PK?-p&_@%T&@^VO86rV#Fe%JhzT9;%Wn^2RAQ-Y?j z6V^YKEAQsARkH_U-F>FeN&hN7YJU!T@gC5}ESTrKJk1M(@3E^^{cyq4GcfS0IhqIi z^5|jhQCZsncYaxgyH{JtSGPs-1%9-@;hhCMWbSZTxibRWSNni%$VRj)9n6QNCgA?5 z=~zExs$^<>6vs_*Lfui$+J7HBk}~q~1M&6X&+vVkC%)QXgBdyqUG!V14~P1($8^8{ zv~><%nq!ZLTr*&u%%C#L6LLqHsAGCu5WmOsdnWUOVt1@wFb@`NpN>oGxTE-Y{mc$1 z!CBie#k{%H^Wj+>qB+ht9HO=DyM%+do}>FdG5S5(H82&1(;B0VN?q9eBw8Q#wmF=+ zxCNt@9!1AnbI@xjDt8Mr;rmrP?zL+rF8A(+?-%W0Rby%I_~OQj#g3ND&{0Rt%in>u z9rsCXqxQhi`)O?ciP_+KVI2yaP`7W?4&;kgR>lP z;<0!PSWo*hrnSYqzfY9mZtikW{fQXyq?AQ&)!?D$C%E{VKH~$YUFZAOZNp5b6s6uD z+FQwb8GCP50=?6{A>e8(6ciW0w3GIX=9+)X>7a%kf9ciy*EZ-o*o-ru0nYE?5(Z$z%2+VC2in~^O@l`huIdS~s84t-}c@yk@-9k24cZ_e)n}zlM_{noy{bFqFH#&B_GV5a` z^bPw8E26h!(d6mWAG>(N41cEHiI8-^l)<@)7IOTfsr=Rrf5=_;xi~cX9H)OT*^lUr z?iUV9R$aD9QA<06!IgTD{Q3eOTk8WGA8o|cojy$dJ`ECPX5(HPJvDl%iTa%Of_<@} zJNhRaE$Mw{O@kB&cL`af~WN*KFo@=j=mF$_l$7}DID9TKP$`(SY ze)l{V5!oxs-ZOh|-tXxTpZE1szkBaF&+{G6ea<=U62$t@Z{qIM%Q7(8il3WF+Z?t~ zw2?rs3ucRIMGNb5DovJYSdTKU{T#0C62*BgWBwUPA;>UrEV zwc<(RDDT?$wC{FpnZKjS%|}+_%tkd$HJ%MNv>>f;qP49ynn&*G%=9Xcd8)|+VB1wO zYs_qp?OZ7*R@xR^Bnv#djrJb|_OJJ4+FE_9Tr*`c z)xEx20PDEby|RQ@k&zmggm3f*@*-mp+NhY z(q(2_o>i6*XVVblHTCi3N>bymCatOIt=ByC)EHRhpLKfOe7Zx`6{5-4V)`eoqEz#p z7`>bvCa$CHvBkv3FAIS6tvGVTCpoXuS^nNKKLz}kM5*~o==Q5Hf)AfOggdC); zCsVn{b1(jI%0clK?`|=Y235JnY91~X{cJ3^eXSLGwwRV(yU8k!kBoPxdEr$UV=X5h zdm|ek7$E+|R3o2msfycZ{NcJXWYn}A>?kq*xdHx_I-O#LO=0|)D^EWu!3|u#Nhive zaz^}4yCcgysiMDHe<^3&Q#U7YJ)HCKXnO;ko%~j=XmdtXS@f5tc2APWQj>xICA5p} z)(d=>bl)9B;A89PJr%i*?@byp%L%b#%gqifW6Uf6=rl%D9GgtVet#3s2xoV@eB`{h z{S5F#qY3bIz!{z&ZbOwHRb=psvG9n=4K}24;n|*a?M-PDT7YUS@+O7PgC^9Hd0WIA zbAE)<3#`%G=|DVN{8N&T&Fn=OTfOA(80~_cpk|(z=}nCj0{4x+d=K(y z;zf_PCxg3wQ0uMV#k~hXeDzHW*}=J(7(XscB1S0h?F&Nbh)Ktq(Z{|%v?}l77>{id2fBdT=)=aT8fj7qS&GsBALfAd18uwkQ=0BUO)>$RPBkv0p zBTxJ1p%L|yRsPYi`hl3w=?ZtmvTHp}%s)L|P>1h5$jevj#EPz+`trHNZ4$hSnA)y& zPdl%r|IBK*c-k^TobXV4FPX373oCGhpffb!f_#zdj^TT%GrgXCPR%*nuRST>tly@_ z6=RbofHyR%crXJqWZjC_Noksuwk4qn2GX1#1^Lsw+_uCsshFg+ow7`Qs6B~QjZk1FJuc$8C93lic`pTF@5bi+n&^D=UKDq>Y_Zc0ZS%ZO7`th0Z^62J%dZ8104g5SkJJ3>lF;Zvf zK%QLpF4s6~XKGzn($@;TDY9P}MYT#3z*RHKW|+!3`dRH|4sN>4JgY&cQsAvsyy@;E zG2@(<>FD;Gn=L=cmRGmvQQ^{3@l}oQZv^rW&m1b(WpBY)dGeK_XB4J#jYj9FNoi+M zb>L6gb)CWER`nLhog6*B>24tH*f)j5_AU|}1nrR_%DuA^^pOZU)6QHl?SXK-sA-31 z=jUTSSYK(q$XVAf=I9aVMp<)bE^~{GqqJKV%CBHJ&AqHnbYYomyU^@#WTWg`erT|2GVbl+8RlQLRSN6z48^_KyZw z-{wTBpSr@Bp$( z9g`oOE=mVGR?xLt4GpE)zl0D&+jFh!jR<^z>O(k0+ahR5L~ zCvUx1LxSdVMNG93Q8v#l)fQzaypYfsV)?a&G-Osbzq+?qX**J%A$EA>l-?6#u}V zP{C&hMBZV+&_%8$VoK>4nwt=B1QZAWzSm%-RiT$f>&Zn0w6-`gGmpT$P=!I2&3|b@ z;Lx=ae;D{dz*2o{br1NJrfTlV<%tbVcg#)MtqYT9yMC;+enhFeGVOdz2@L0#>wk)_ z&u2nw={OsAD$Ndzq?3aNgP*3d$I79c7+r`fBxVb6BfGnv=TS>8K)bbN=m!4m9E)?@ z)-iIFQ+ssd?A9yj=f8Qz{i)jtxYVe$-*O5*6HQMu&M3?lq1URD+xdFRKY-`(j(hYW z@$RL;n4g?D2WDGGTnM1LVIgd;_A)Peui;XD4RIgO=)^l;dbPe|jxHEdr82|MSgY@> z3T#=V{3I#eSa$PPdP;Fz=1H7DtV)c#jO>T~^O5JFUhhRC^j$*ZNqAXOyr#GUzJPtV z`Z0WryjrJ%t~}+d8e=&q0Q>tj+k+*ITUok=NX#oa#?{yFnYLW#P#CncS58b!sN7N1 z`5r+{GA!Df?6x{Ird0mv{K{x?)X$Y{Ea%1K$1XIc=}AJqn_oj#3SdD_JUiVyErChg z;d2T@qwDYqgt*f$AI+x^>0m3tZTys~P;>P8Cdaj6zrGvz@qz^SfV{xM7HpR?iIrbg z-n#0}?$AfUe09+vXgTzz*7{^9ZKO2Ym2MSv_(jZ7rs6o7;@DFn&y^P7*@2}6y;?-T zD1rurH(5ZX{R(lt2kq&_NHayng7Q^Mca+V+N_cSEVuSP0dqwlUzYZq2Nv@7u%GM#p z$vI!|9DYZ>3ivK)oi_|!PA}KbAmx30PhDZ9yQ)N$1O6{ouO1wB3_Y%pw zhP{<(^(yhTos`2*7*BIcBl&{|NAxooF)viEgKIV581?A=N`eo}M5hd6cHFjT17(kx(-`ZK^!IrI&a*FEKqnrV-S!8a&dQ+W*K*5{G7SX$0wZQ+|MIQ) zLbq9{sYEDkL&rA<)Ah4!`O+f?a=O_DYY?{*IFP5ly$1ep6Ro=>%g{ETrQ7+#3Nyr& zv#(hB9^^1VQ}DtgI%+UCWS@qPX5z&!T;DuxbkIubuJxMH{5-7){$6@{-x9}sO7r-` ziG1nkAsRd(ua5f$X6~SxF0HxUsU7ON!ncM!f!A2+_$t#kXo1NVUR>lFEB|zF!)~Fx zK9$B4rp7QV%U|8$Cw5%Z5QJ_9%Qf(#7zh9N>zw}UXCZGdy=Hl17E%dHNDVosUpNj_HRk5g_3ewfs7%y8}#PNDv z_%UU2J>UKC4Rr~8Vj@mN#Qa9)sQ3GMRZagKOkK8O7W6|3h3MgQ+O-x9-?E-(H(ra{ zkShV#SZQj7cWyy9;5&W^;HR$qL)M|T;NcKXA7n93XZdT_U9#w`YhLPbr7ne(N#^7a zTdmL^)-HFB%aCuibzp+*(eOFuErjLogQR%5c+xvFY*Ymkb0FTld}`Uz2OfQ2IO-Z% z5*{XpgVTau(2ehv+$+g_y3A@eD|;|1kU7_}8fz7jkaVf+MYSORmVG_OPrgJLFC zVdRlexToq5KPuRn;5%s@vYI!PTEfNM;3w}pQv1)3CF)UT?4)_f)hVp-Lyhg=HOsV> zUK>zDd}nEBx*EWAS+QLmPU~UAQyg!?pGGOX5~w4Y|6=xWXqiwAx?eyyn5y=aQM8<{ z;sMu1ZR0urozs;EO*qIi0$VUNxn6w8e$3ZFzLPtNhxe4^zB@1avJWc%L#zC|bHaz| zq%?lFnEymVbP5&BUx5043?_JP9y}q4vRtY{yXMh7vQ99(h@omo5&bmYGwg(cT8gRS z-pjj}w*PiMcm*G&C$$Ny4&-P(75|74gGxE!^gUGmwX zp3{Wkw`9Es=V`*@33BL`=A>$a@Ph(1U8(ffyo5z^b3`w8bSkCdTUY)cH6`U~#LqiB zg{p=2+grt`as8!)_rmAH)x4r+=tHU=1N~vt+&Q1N#hP-=k_!AhV>5Va5l=XrUvaZc zeG<+-M=Q{+2YopDZm7sRaWra&1+}v)+d&h5Rho(R>?@D@VM|uE$nz`96L^%L^p9uw zNy501w|5@#ZeCBBG~}g(Kfzva)pKeKML#qGR)oW+RiG7KE!hTV0>NAHCXZvv=Zlxi zu?JOB3x@WkZ|lB@J|zn2E#(P9Er#;k{%+az$y?Ql$nNhI@O&6qHbesd(A(sCo_uQr z3t*Bs`*S|~_8reJN3J5}r{M*2Jlg%!LkS*Gz{d!slc2dJ^rgykE9O_A1}A@38zF)h zHaA=5ilP4HPl><|Jyh*po*eXqdSB{As?NEJmlNuAs$RzMUDCfobk2R_o=uy%yM@A5 zxM$)T-C<0P@ASR)=X^eq|rFLDpxKjKgKgZ!eIjd?eAf>H70Q*vsralnvB z&i(S85x)~X6Sod=^~kR@FmVZgSp6S|{0Zgl*QcXyI8lCB{mCM(FXOz6C$rH$S{_|g zg(?qx#X&D?>JBy;T8&}TDIw|zO{rp$xG=`t;RR@v&{EyNl!XFGL^doHD%|Hsp87boz(JjXCw1XZhg6# zD(y1YimKV`c*X|s??Ep;FV;K$cIr>IE@Onf`%K>0v@Z8e>&2()7UD_c?ZlWTU4+ft z=k$7pgRC*^hbFT+akFLLHK*|}Ibm86y6I%_)Cj?IT5l5BM+0eY*e~Pwpxipffo_Ku z)gsp16uJCH@~yn1MZt^Lc@MmG^B%v%leODL^rp9R>1lg@^RS>^b>UimSJKg(v?eiS zRYNx|zHmN$)uEs4|K>Q?aq22NmbHles0%Gy=b$}EbD^O{n-Ts_YkWEjr}hR_Z{Aj{ z92rGDd}oVEogWE@=G$r7u5gj?F3YAPNYGxGPOT9L2GfFmkO+)K`5SR-w z^;RJhu|(b8`=b8gMq4h8qFGo+T6|6ewoCqK6n~mRzFIHx^I1Y)np~6tOWV+iT6y(i z&h?GPjf?Xoy~5FzBg<-P4B8z!AcyQerj9v|-mpSp~7J}71p0#t9o}+8?;N%r@%rOrd zTgi)X|JtGny?FK}U#`7$E4f|RPN~*-G3ccO+t`Il8@qxg@-QbS>mBGRjt^QWzPs%Z zquN}MXYFv#_#a4TM#n8R4g-7qZH0Wgrcepu}tDgIh@%p8PvJh##)nOl?MY7o}gd zQdmf@MkI+8=dx0JF_;6Eo|fZ%stu<6mDI?pXpUT+g$nSfFH z#pKxncqCKH*P~O4QhMPT2iS(_Vh)FMttz`rtSYx%ZO#eC=*rbDK{J z7oOFMqY9sU)6JRE`0-{J`giM#)ba<_T*{!T3keu1n@=rg`j)I>mb&#pVWLD#k;;*j z@84hclrFk%QwA;;~l&w6+4h9ab5BHas#SA zr;qG-DwOsc{Uzo@{#sg{TkfsE<9+s0+jj9Z%qXEd4C_jb2cd_*`*;H!M7t);wKn?Q zM1Q<%uILeZNmNfMt=sf&$Zj97iA~Zlz82YsBVX9Ct;-v(HaT4ahv?ba%DUnl{0{A# zn2QcyX+~F|Yue9CFnnr^#U5l$_(VWGYJb9xN5ywwyPqv+L8+YrcuWev^Z8Gsp|4jd z9+C<_3e>(VYgNGBH|IBq!IPL%tu-iWS3PRVUwGf^K8(*9<0KUpw4YaprfgrzpF2c} zK$oRCTv_zU0s~mer%Gf~^SbT1aPC|3@V?^a$=1*1y|3#Vj`G|rRScaQy`Ba<%R`7y zemmOK7T>r{F26T2;)X^P_vI=#Q{|sW4>@YrVU8`)lJ^_;S@G7IF&>D+tJDx@P&fUT z&X%-#=FAo)>D6BQMsm#@+FZV%oE(^Jq`QYz)$ntw>Mn1 zNptM;xP!X>b!MlqP{k+wpvoq)OP(l(`a6M_zH;jC`6Bq|VphBZ&QRPUv?>jF+Fug5 z9)0;UlX_foXFrF*6ynaT<+B&fF0|#)b47Sd9E;404`kXud$DX?h%utuMp@&HE91F2 z_3k|?_b(Ql9ZY{V9aO@zjsU6r=AJ;U#D*2J+Qcf|CiW5k_nW2wm`XX-xl zr@Ur!PnJvz7bP2w=3<}na{p2b$iL=VRyyLyt?mSj)n5#aXFONK8zYAzFY;@_1DEiI zf;B}}1$(ONdxP7J>qieewikDwHmA9kOFU?Yg=gGdBQZY&ERu@Tpbv1~q&IiUmaI4_ ze&=EdzN8HW4{-Ck=XrUzCc28Vms9iTSN?S4fC{a3=pFQIy_GkeG&w0NNW6I%D&v0o zF!U57@3i8v&j`ARj=kEedD_)7yPR3gh!O4gc^l->LR@nK1D~kA^KEE_0y?yTd9_Jp z9`J0Dc`iJifG>uctL`&rP;>zLCFkZg6I&J2lrDoVQXHGZmp}Gos`;Y!`ar4lqsqPe zjizc=aSoWAW#Hd{MTbSHA5LnF37TAfOTjww%f%!xMZ_id;C(e}@Jz2y?9;)KEqxQg ze^Me}dDpkCqCn&oKGtPF+oJzKX@dR(22fn`jhu0FX)!~zXz9*LGuJ7ck>DCJFfpIL zw8k=7;_Gq&3>8O`yb1GcZoX?_@6yXDEF_JDJ3S~k16X%Q zeXrstbKa~NEqf#SrQ7<_;N-%l!X1UPQALvFU#~=2;EsoY*3!G2DX#p00Zk)@2bVH{ zm*Crx6hF9*@vA~L3Lcn`F#hc19fG}oJ5ub2wE`HGgF(>6(h{9lyE?dsghu21=Q?ol z*rBX&`hJ-<8Zes>|G>5HGG)wZ`D*zPVRPInmll1_C#FB4L7fV*-M~{yuOJqaWtlFg z#kug-^5VEH+-}8uI`OuS;s)S*CvC=*)ug^#{RwXNL@J%8JjIgLizs5e5AHWX6uQ5M zPLEhA^B0aGw`Kk!Xh%Or%$avn*FfX^lAjN|n#*U5<8ywKNonNnxwc^6!O~ijS|=5@ zNTnwbrzU)Zf%u_A!!+`&JV3<;d1WNfsukzt`{cYt} z+%T*bL*D|^LkTftUEU2|XXj$7Gc$mB{x$(Dw9HPOM(|F2=aVCKUQ@&bcf*UtDV-ys z{Y6Cn-NL8CN_n<&E310mJ9C;*_QpCqt9~05&#cCB_JfTA_rjIzQ)pV#FbS@cy|3<) z-E+AqKP2(J7U(3Q^vqN2$5*ASlZaXrq{awd@S_H;LhTOKWbZ=`;ImBx3>0%M9hHAG z+(*QTa~Y*M{qbmS(K1G939)zJUWPuP(Hi!}oU=(H=1ph@ZuiBO=HxjiOARQgG-e~@ zy*Bl~f<%k53GpCKtj|YTQIDzjsGT|3-EQ1HO?fC_h*oB88R)m`+JhHuwHLeRGqAv{ z_o)i@pZ5~WXS}0U4g7O*_hM#wQoI)4Go6*^diQM%cy=JA+lEusooV#3bScyR&pE<8 zll{%IR8c!D2KWUr@(dgsEHE#^yL2IX;c-{#D_Z)xf$S05kwb%Zt)}Azsu#M4?FaiI zkK^GH@5 z#pRx=y9m5SrC*&=dR_l^$DeolU1qQNMY30(9elm{0eIC^Zrm7Wm+a1^LyOY9zJE#S zu_9OcD6hldANurs1GlXB7am+^rQiL#rm32N#Iv#Dvnq#+nb0$GUf($?<}8)xtsuS2 z7*ZM{i?8EXnos;_f$Ky(OH%@)f z#M#QHsN93OtmZ4mqd5QRUP~zbGw?4WX|oONX?IXSmmHZJHj|Xc0^docG5XU%1DGjPU8c#~iQ3uYEuax6 z5cH-WqU116Em4%b}o?xeTSll@GXmps~cCoOnQu zf~PetA`k5e<)~Nr==hS_sCSgWzr7dGu-tK>fqKLV?conM&Q%%xDMNi3H4K_6v+3$i zAALi_1LYx6_jw6Ett*uNQ_qPQQt?5`e=DtbATrs~r|(UL%Y>LD=s>(PO%RjT)D`-K zqUL}3^Bfu7GnP?LBJIT!0yhiw-InMe*~hX+029rmKA$M5VGE1i$$fC}j} zW5aRiT354B)D6_7%9zko&_g}sp4Gcin>#C43@gErfqe+EEj#TT!JFy~CD(vUBJ5*J zrSV0B_V+Eo1v>IyH?FX}m+}cYS_O3|3Ec_|d&5dQ0vCV-^F`RjlIEOI?Zu&CMh?fl zKlxsS3|;|EpmE2tR`y=~LL6T4UILRWs2NGr4Z)4av0s)|%esGtSzZ`6VCxhm*&GAfw&JH(G2V_;}d1FU%^_FPmv&>a1+M$ksufxY%ALEUNX2u zmhaJx?|PTiXKy-#Tv!2o=xeHc`n2sdYMk{A#4@M-3r^X3biMYu#t;79Jb=MPwDx<3 zhFDcv0{l@)c$Exd6@SPJy~dkO?6xd}6j!JkA+(aVuK#rkS`aU`FW)E|h8>j1dH$B- zsn2QcN(y^XFBK|3T*{xbK;NK-wM9akaEHUm655wPj7w|Syr`=lyT7>v|Det~PdpFk zrD_UFa|rM(JW+a1J#A`)6cx34Ihw|GeGGp%v`$vd#+lUTI+&_1cjj{{A)YK9R^klv zuv+@`I;pB=nSblm9+Py8xXNJBz0zDW7-?&%RmBjMMRkXd-dFbew616*tT7XdX zBh*$j#a zN1_L5<2* z@7UkLM9qSr{|S0Lhu4+HR2ZYYHpYzV9@!~-cy)k(-OHQrS|vP*!esW&)Qo@im%;-s zL`^q3JDO?aFitE6Au-p-!-fp^GtRNj%~e_Pj3fSTvp8 zk*AI9!9~253HyciM(Gu)yl2`nes=k%h<@Lbx}3Gs8yM56e4)m)L>nze=I$)-TJC7a z+nk|uqrQvp=uOIwJ07ozmRmU^IX zb^Qzacb3fiY*e3g3hx*CQ1RMrxV(Ql7drQfJ*%(Ae&cxmacw{EO{ifMKXOBa)eoi6 zYCU=6)F$TdqMHe0C~+?`cFS0L;O0QF$0yRNHHRqy?+(keKkewA#1p6$_3f5QxL*_Z zOO5cZZSC`4JnNqeujxHX8#}ou^O51=jVrjJj!8xC=c2Q1Cdswq=5xIl4}@){Fb?jp zfC@IgO{r6AO1rmd>NDC=_mGC?(cd&#Mu&DpQD~>WG@@1tjm&*TfT(b4Mba+^c zhK{u5@91ZFnY3G2!DktN9oVz8v$+B9moPUn3u^;bmp{cLPRH@zb~vBnoS%UdTL_2~CE#f=f4MC;8SM>AiXRPiSY2KKiO`reEi1n1+Q zF1fU0*;{Dr`55=pCXJzoI-(p6!g&1p#C|vhD z?qh@O;x>!lqSlBw;Zpaj%D9ImseHB}B48b0F%J>;idExWz zRB%i)*3i3tD}NcD>Ur5HJM9-W%RfL==D+ert8x7K=n}#hQ(UDjqF!tdxo*{BIqqIr zb8EN_kIUswxf9$(`<3AwIJ!1fntxMN`f{41>gJ>UcsC3zp?)i$iDG9Y?G29;m9D0V zV%V?XxK~S4%~77fy4ZtQunf2(3Ojz*F#jq)wLQ}cAXaZ$w>$2az#jVhD2Y5?6d=bk zJNfkdMi$Hu$s%3xd41lqv9JZ>i@h`~9FKR~z$`t_;uLWyz7sv0nMaSRyF`oM^pZ97 z#)TEPH^-0M$Lje{;r)0o^zsI*lo+eEyftAF51d-UI_+M%F$%rtoyHFn3)9@$en&y} z85S&mVehL?PIKg^xCvByVo9okK2S@vH?=CZS{}DeC70LPV%5^C0vI6r|C~qPQGEzUPuz2h0^a|Xt7`D{&`NpFRmOQ%9_$I*x1cZ~O&6GXqUfnwV7KH|>hOF0;Q z-Ez=SxR&7_E~XtAK=nJnp+SrM9p%Ts0Y)zI z(SBiK)PwVK>8)b)ciSDDM{7#m23^CMuO-dJ z|4#90tiuAv)4sAy+308go0UOZ4%g+8zBk1H95h^Z<6KehYH8roU8B&sQQXjJy*BF5 z5L~keb;i1llIbCQ(q<(Chs3A255|#F`3QMSty6ajaELsM^)iWLt^&UnQ26mEnzQwS zK#ptAdf3pS0(q<`H+Kw0qqh9cp;Z;oyNI}PZ#TpCqP5g zpb1z9QS{no+KPUVn^)(8Yb4#Ay_8b!I|`M|XGlZ(9#}pz;ja z_~s_wISy-~7M~`Y3-2YkO+Jj<$SwZVYap&c_c9%yco{ZXT(?;G9{BSfs>UDsN+@H^t1kU}t{at_j>{t%BAkXL$daK)`H?hg@7;fX2 z0#4sX#|mxZo6*ZP$_cg1L$5ul z)A;99ndeY0?AiVXJyUOf=F$;6gxn$AL&Iy!%FFMJ~T%#_;_oNEEs zi!a`5WPj|D*T}Dso*14*qv7(y6Z>WJRG@|=;wp-=Zs;m%lJqHto_(5(`fCND^!Bs8XFUsNrMgMGu_pRo+D_A9;yNk22{A)^YRYfk>(H}u$EHSlVaG{w`#CRtRM@TKFEZrnJjv`na7&GgP5 z!r*NcYt$lTQcmt&=)99cF2ze|1Mubs%_G*6|5qk(Xvf zxst3jG_;B5_Gxtee*pDf1+(V=1kB(yzUqFm~D_Q6em4C!D@`i2fejTgAa-N5tHeK-zGjk zcLlf3SJOOLCX|X7Nt3JhW(sr69|>+_#I0QazB+ZwYAz9XJm~rn@%3j_D!FY5K_`i8 z`=VvD2CW%-ox$gFQ+Efg=|xRc!5ToslcDCK^ud$7prIrDz*r8$ddbK6Pilx6!)ISn zF?@R^oK4e^DmGYyop5e4^m7T&f+`}H@qZ`&IPA`+vG_F2$kd^iX z7CeCcBQb*6C&TeE64D00$K~a z9%-COKZW)DxyaI>tl55SLC*LaLJ>+9C}{Ny5r1~`$9wCND)d0 z4Mfi<=2QR+EnCLzg%_I1L09^7=}|gw!n#}FxuNn#dCNcDDzeIv(Jm7?F3(1q*(o2t z81HT#X#ZCZd=Nu#2d$96b}F+Yun|0`@~W1gEB9$z;}Yt72|NL9JCwGpI>3lK?rYbL z5fe&3F|?Uc`kSAz{q1hXvzb}OJ$Z%)ap$CQ@cfTR`LA-KBCDUp?`XhdF{A$%dDiQ_ z;%cfssTIW?DQ2#T`Gm23Xef;Pe*UCXIdHn4CcO)d%(<5GhKM6*j6hO&qVUwURel1l z;JzR6w|ZB_@q+=hr0;lY^4r~9GH()NoM^`OKlHn3xn$^Hnmv3M^jQE+?b3h{GuCV; zf4Ui6NH5Z61#o3cFjO90LXV@$(fK25$|p~=AZ0lTqpQgf{r$ow;p`P1IC?c9}fFI4E{y#rb3NL5)05#TPPe2e?dvTat0=~ijEE4`j z3lsDKx2hMxhjx_J5nDNaPx(=3NadSz@GV#U+3|kzEP(mg?frnP{=_xt9n&HB0?2LAOb8nnWBiygCJV$15F5durQC_zzC;Dnkkn zi*LN9>OB^viPT)ExB@Q9PFPcnpXHcYBRSW@Amizw6K&wor7x;=nby^Y z8rwY^SMcb8KN~`8Na!N*F?bB_0qb5yxJq>|s2OqW@sC1{*R%r<=s@M4tawvB!fa;{m5-xb?xEtt@jySbj%ywe9t!U0R#P7AEjmSxT3$J1V~@tXLzl;A61d`M4FA@~W5P zYZBXbAlphs%&%Fyxc0<#0<|;Q({~{i-`JJPEWF3Z+xQC9J92bxs~#IkX=&8Z^rfkdk9(4+pp@Im9Dm+L$9|nw5)`GGc#j#x)J|NgwJxysSVVsx`N=(2%a7q zZlFZY=I9R8jR?F)S5p>>Sry8gsKIgXMY{+*%E$xd+YQtz$WgB%1|8BUJNUTpbSPpR z5g`Qst!hiUL)%8yV}(4;kIkNH-)G)x0L^6pQ$$FKhP-CZJLv6Wien|7S66xXYv58b zf3J-RyfRSVFv0g+bkr&y+ofQR*Hm>^=$srru-nD7>XxqX2|DkMC^O&|gL5d`DFd~t zm5j0By<;lrC$au(FxH#Ghs%%7 zRNYy??~{qOSu?8TmRUx>9DfXcOM!OQdpC;a+?gKKH_BJ|*I9+SLfafqXXkjPVf0{M zR+!RdXD+_kAeZi0y|@l-sC>DqGth(u9eDcJ7cv~{i7t(3qo485!t-_26i#GfUH^yG zo%EIV+u_?b3b$>w#pb-_X=_3refZ;f0~-jt3Ht*gWu$MCw1*htVl5}16iT+-zZQ|cJ@#Hx5Z#=($IK3(Mi!w3_^7*tFc`(1D z-r2VXc?|7FCD*Oyu)yA;^l$7ry1$gUb_>p1er=`ty-SKyMm+cKyIKy5#JZTO_Wb>O zM?M_!KqOFU%H3`;XW5)&@BNYVdE`9y*j9-~Y>wAv1#ITVUEfjbZ$Eg$^HTb;+4ZRB zw*^vbJ6krs?8vQ~IPt<4&)Bi(TrudvTG@KtBf7Hqhvrl>w{AP#$()UPc+0UK=DvTM zMVGrB)geP1)Jdz%~T_o-!}f@atGH94SsY4hfnU*av!6@J$A zp=^0cL;a()-aTly_H^8M3LaFOHa%=3zxRADKK|GtqxLr;yB+7L>!Wm9P-{MmQ*Mms z(`U84E^474t?(zD6&`+quADf_AwIo~-0kf2YVG4FrgU4)`OHRIdv}K%e*TEMC;oiS znZ~tVM(1iS;5ywl8EzMJQseGab0Onr7=PFB9n_#gBXu49{pV_&+OZ^se`!df{HYYw3A9kQprYL>AUBtScsKb@r6MZY`jRa~=5LIo!+UOd5uM zxg~#M=*QV(GUUv0>|MW?eq7H_zB{&Q<8D_@u^%&uyEgAH-mY+^`TdI$EpJU}hwC%G z&)6UQMJ}#Vi>u^+C?3zSn1NwwG_c$&!{R(y{VdnMHG|~+t=w|uOSy8QD=j=3EiQk! zN%?0d(Tpc8_*mm74e}aY0$uWEQSPA<*l9c=jKAKx&3NZb9irNIE_gCLUVdW$n{ei^WZIcRSa=p z^?mZVV}K}|Q7Gr{y8CnaaPl!e+i#`VSqi=5)3BH1!fM9Sn1w=(^Q1#qGft5bF-_H9 z6_-QOPs%92%H-4FJ%2fNkvk0g&FWfB1|Q@NK{%uSGS<^vO_B#j?&5jN-SmI|ZRCDm zks_hb+0LXb4 z!5<;-dr+4pHae{jC-r=Tj<-jjNJSd?`49WvtSbGmM?hlX0;bU}k5F;s8F`Oudz9eN zBR?eOkK5ndF76aarb~Sd(ZgCx>`nD!qunCu-@%(*x{neVU!E0Ej`2)vd517rxtWEAkJ>4}M>NE`NL>bowcwq{x8=;kl8-kyc+erXEmW67hF2kf zBba77`-z)Z>q(5UK)kVU%R&k>DSGi9Bc$F|Yt2d7^gij14D9@$5&f(w-I)@B{2avi z9YS2vzhZg#)`ZP+YW5mA=}>V-9>@oN^9|&+uHxT$cpg1`?I1$z3TM9`+NdH+>G9$P z^mWxyi_`Prgj^RP*FxY$@`-w`Eg3i<*FV}~WOs`~e9xfPP1dNqmZv*h62LUGM41WJ zN;4fz*VAbtp;jmQSoV#GY;!G+{EvUW#4{uaF>V?T;X zJKGv^PC=UWIS6~WbvOD27NqlOIBUIl6{@;;A?;all2u$OJPf(!z^5vPQ?s+FVpiN} zF1Mkc-0*xomt0p|x4jfdeLZCk?zveulk@9`-1SXSt}!=3YLy!3Sq>|-!b`j{CnZo< zIm}~6UZzdW2JkOYM%=wqN!04RjgbrDQ^zj?&qIZMvuIDX{o39NL#Xhcw_*?02cAv5 z&v*_(yzsf8%L3!c3Ab~Jrm+iXmF1*xSxXe{xmp~|zmgg}8Le{CD(8eU@>aOlxoBxq zY_S9u%?x zZL5jmi31Gv+4N#v$*|3qy+UI7-oG(?|H?hl;lMij`YBU2LJLpR&Y;QD3E3frh-k;af{&Vo^=a`6-ulF4b31m66jJ*OW_Ert-jYqlDsQ#I}KF zk#DQz&Ed0^!_dQavJIEsSkN5fv_PcyEJgDcWQwI}zSR0!9pp$6U2$m3npf$ycOhW; zPfLS^*V#Y*sOXmUSBx4Tt>L>3-F=vBQt^S-?`stT=ZG>V+DX5n7{=#ka4vsrt zl1>>lfR903tb8hcT7C)~9L&FKcGg{@7jw|rNWOFaDOVbQnv;qzq>Qg!q-Vo)u6p|l zH;+2RQP%wC;ikpRDed;s;4fWSai?RI0_K&))9I}LY2IqaA?Jt66Wc!%=EVfwoAzz@ z%lAor^qA6TdFHeymL6Wks7*0D760OLcr!8f+;gs*O``LjR>t`rsj~mFbF`sAe;R0J z@KC!5Y8q>+;+hsLxg`3AZ&bevo;#>PL&#n}4#J~(xKuwA+x{wpBPFm={2P8%9C;GO z4JsT_vDjexL8h(CJhXFd=32Xb%+C!{cUbQ3cH^gods3+*T?li?ud)TLcBoIA-8M*n z!`@W6xih)9TrtMethhT`tZK1B1791Cp)r(sfq2cH&2o3TE6+@-N8nxaisvJ0Gu)pZ z4Ef98crlKen#dgj7gOB%b!@ZKgL}C(lUqI%U}!`!@j)yyD64vL!P&{L})BcFN8)EHS?*J~$770FLM%J>TKs))sz z6yOa05PX55i!^W{14HO?mk*M!-9mmk>);G>vy{`JX=(j@dwq>>KAv~1k{;SOP(l~d zT*ujrF_++W1{VqBz3wqIlUiOf7}``Gx#>@F@A}}3qFlPuO02oBJ4v=Hc$fz*`o-5y zpP+5;>zKEjhp~GjXVZVeSi*f1bP5;fScwJL@xY*#Dc~`&?R^4Ie!5X84PMTi zNu~NqzR_qMV;&eh#aRRU(74;XwL*uJbK)L6!GFe9V8km!^HR4GofHmOanE8w#T3Qg zV${}Mz}ChTmMe|6^>H%0+`UM%a27fERV?cCwXUV|f}h1s zBj^Rar(b9KQz(Ri+tjB<9?niqpj+E^NazmQf29cdwp=d87e2(uZ}NE z%o~f@I`*tYOea6NeVH>Yo8*B^dv?1KWISqpQ1q{yXz92hA2d%-MOlO=vfLtJvfWAj+?O=9j;F|zh0Id){HoOuT>$zDf%2pGUO z>s$~o-aGJ#$1O?pI%@QK_+C6oDInkp^yYtj1++TN=;WoaNdqpkZOxy&G!1>MUvo1! zgyB#0GojIz7p-ntH|$R5wRbCW&({|7N?;um+KgJoRAK05I(+>gyhknq&QN5Vn$WZt z2-<@c2Sd|Q`uBP4Uo^`=4k5lbLksj|pUfNLSmVKzIL@7+ODSSo2LoKs8DFmoyvb$c z4Ufq@Af_aTBBt-KrM3-qe7sq__Fm4wI^=K-h1t+(YXvkd`#J8XA}(zV=hH?LVuqn%RQ@U*!y^Xt<7sTC+qFE* z|Au!lTiKqFnPJ$|6rbn2*;VPvM*ov^)p1!gTNptwKnx5}FhNC33}ol5-L2T&-Q6Og zB7z8timfPOC-Ux`Ma06w{#3;74#Ysk`waIFf8X^A@9xZ;^VGXLbN|i(a`WuZi#|5c zaZOHpKbXeq>y)-5b$|8&wUll&j|_TbK--y0KZ4VwM~xhY=P}jV4_|HakxjK}$;oS^ zIQ(nhBpzJ@=gT|Pr?cJ5X*T;dvf|>WM_1AE-f!q+sh#|7tQEtP5PqM&*QrYVyR~EV z9i1!FTKO!6F9dF(${D+xX#AZF8TH&9<8Y{)9UR zETt8A7w6Kd^$d-Mxw^$K-$n9`JR9jtl?2i2RWeQ4A0w1chxg+R8CR9|XJgw9oFkEk zPvke~tL(wW7Wd}U0S7p6cmXw@+T=gAEj4hk)Mlc&-!qsgH>-ycsB|Dgy&yPR;{qNaip_&@}xZaX}M6C23*jA zBLcdXf7-YhmxnFae%h?3wVnLs!Cz_oZO{oV|AcR1bMfn9|F_%RzxbS7UD$7c6REX| z88=gDT+6DWOGx%@yWesD6E#B%)j@n04^|mkeUy|c(o9ZCmh`A z2iJ-FDsQY2R4F!&Ctn+^^rz+gPn^@}Re)`p5PZCiSXZK|S$tJ##+vKEex-j|c^&wX zqz-xB)406JBBx1|(ycnQgaDsuy)B2i^Ru!7{j9t_^{p1lDTmBlyZ(12T8aay6ZId*&)D~RBWe4?rGFkB&AI!Us zZfwoViRsk{Izeeyo)M%x#xqgiO-cRgg&6I^it-ZIlZkN^895H;>c*|}wH;+obNMaU zpBZNz4)UxgN~f*HXCCF(*!yAbr$Ri_IhLX6NNIFr4e+;37#>kRntUFAS}UBC-5No2 z_B=zO7n%v58u{eMin`Jj;##*U5^Jao~haH0Nz|hp@`DRt~+N>xZGG#ImcAbZmRP*1Plt`nfid#*OGj?;m~Tk2RJ^ z^bR2lP&%IN0+$%z{zT{_zO>3k;ChsmvjrJIL0W*ky5oXy?fU6<+@K%!6HLEhmic~D z0uv=TOkCR#nu}N9Wuex#$cIGmU0q#iHDD;$HiMwEoAPbD62PN{Z0@?m(6@5Zrn`)K z(lI^*d6ZN-Pu14oRM&>^nj)p>ZOq3wf$W6tHY%A2Y*jufR|CV>$!EdVqE4&(az;=P z*3y|mNADFA{;Lh|Jck=+_7XkHug~?h=ArM#@Z78p&*6PlX4D1u3iB0dJ!J;o|TR)+V@IR_%!VGa+2%RfPHz;6LONTfns)Y5(SY$nVyZ$_kW* z=(xZUnWIx~#&zKO%G}&i_2rE_BB~*RA{v0YN7O*gK1-@hNs3Vi%%56dzb#BHD z2~*jTfbZAfvy=yw&?^Rf7yr5)BH_7_+0=pt8V`L@fvjuKHIPY~QRA=h`$zcR%VUUC z{D4f;qC5(;v?wvmo&tx5P{Q;Wp6uwI%QN600>2CBE@Y4MH1vVQ^^|@lXib4!MBNX# zqrAd?s{S&DP!m$=nU<@@@Q&p>z%Bj^EcZp80ZqAyyS*-D>CnzjpIE?`OXGXtKD|rl z_T16?izT#YE{-an|KnN&L-#5_phy1Bq}U@*3I2@A4|zcF4u>+ELWtY|oRB_l(Xg)0};p%auJlPagfJW%Gkl_Kw>Sb>aK z1eKX2ul=bg8h4*W=liTP(7W`j!M5D&#yRN*a>Aaz4lM@CN8U5JbIB^|XT$^jB-VGS z06)|0N0+2spdH>3iN^W6o0Y!QkjY5&Iw9W^qr)etEP?PdqiMZI7OWfm6R*;@s49uV*G0;onD|7yKHD%Il7SNFJC;quwDBPhC zkJX*&z1taaeC=SpSox;<&!5nNmqyWn5w%E9zDgTQS77)Hp)#x=!w>P|;}1#IB)9;& zJxdn#S}tof9ZG6_RMt}LWIR8J-$Ky+G_!qm4ZT4s+w9h8Br8q;zUooqI}+w!qE=1h zQv&&+$gF-)Wf2;@3Al9`nJw!n%w^ONRXyM$Y9EmXa%4h|DODbw_f|h@`tus4g*C=D$uS`+kPEphTaFllSHO?FOGMq2&yF_D(meMPo z-V5z9kvI5xap>tj{+ zt?jM9J8)2j^lT-YW+4AweA*bFy_~my-p;c<^3(mRcs4mXf<>p2X1kp?c%dfgb%C;G ze*1T1zB?fHrMB0wYZ8Y}uf~r8k0fR->CI;f-?FSus4J7Q%jCd$Iuo&6m zY+-%!oU*2G?ITPB^5dSC%b#92R|vm@aY|=A`}g6plRkXadrPTx2ZY;~({kfZoKF=~ zSE}_pS-?r3vMfSV_lP-S)C=`EYR#E_*UP0@(X=XLfOH;~B>Vo|iG6W><%hz9P-|;R z!*^!hcU*-ze6ifK(SaIU$KX$Cvp^Kid$uIc6%f}x;=?o&Hf|5J8q(C zUPsm3((R`4v~lNa%wGYcdH(y7~GeKo$F7%_jILRy^9+@6_R=NyGkcB zMx@EeatkQ1sS~w)nqjoAUkB@Sje3u=OPrd1MdF&`goSILp4y*Y_G@yGrx)?6rUFk9^y z#{L_|Qh+6mr%j$fC!Y7z&#ql3?6B7}moKL;D>mGCf$tD)Y^(94%I&Z&b=h^x5~F^W zzgU(xMEJx+lAakSa6MV^n1zS_?m#r|nHm?*e>E~O``T|U`s~j7SZi{>v5E0>;$PGh zEx*EByG=G!1*luIpg70^y($XT9WE3yFG%t7E0&asXgh@ z`PE{ZN0b;Gwuz=Bv^vwDk zUAWN}doS&zw_`i-7vSLWpkF~>dmkz#q|LrFXtfS$z;4{5-#$H`L zGid!BPm#a#6^id&joyCAX1hPLfoCzaxm`AuNZJQ19VGkRsX{Ba`5Q-^?1bXSlK1D( z?7Gji$C0s?Rr9Q=R7Md243P~&x6*nmoO7ArW)^U;*6|r4c;I8B^QRenD64|JbEggY z^sFs`&#ds^Wi40v==LWr*}a2Y74kq}J%!iz6uB>G44(Ne&aV510PkO@-lI`IYv}E> za)ddi{6VG!pU}(_i;N4K?g?B^Dy~y&gEiD^v@N9#zuifoKNLsGvqP(zz-RsEq{}k& z#0<7SIZ=S$fo=U&?ed`H4f5<-XB~ZpaSbzUAMNG8qmJ-|4ymf98VyP=qt|PAnSq}i zS+pXreL5BIB8*pa&nwQ25?E_45-{8Vo}msO$$74K*#5AS1{~1p1f`0@tsk*m*H>FT z^*Ud-eZqe>RuoZjO=&gWi7K7H3%Q32F4w|Y-x8idPsi^9?nW7%>-HvNUKl&rT$jKD z)rai*^tucy*pBiHcBYV+slbyFz|nG4<`JbuB(RCW&D;(iQGglxHv(V$qGd=YntJAcaJIkQ@FXnbkim{&*!-|?19zWp<@Q_8cA zR+{%U-K_fC+UyT=h?@dDk=2FD?BBb#B$b*G+cIa;Y z-aL$4em#&`|Ef`Z@l>r_SMptG+g@Ey4h_hb(FO8raat=@JP!V+)bLmIFtH+qp2*^P zcc*i+giSQBYis^}y%>*sw@v`Jc+s_VF|6Q8ypP{W^cp#dTSRP;mG_mCt>-wIn@?Y+ z7a8YCaY^u z`AfdDa@e^H<80Y;{3_vs#imUbbkuJ8a5+jeeBW6=;5m=g{bMb8+|+Z%gNinU`UKa1 z62LzRtt1v#Iw)T0KSW*6bpH8zv)nVvm4UY+~eY}M_u3|zH=?^Lf(*7u9p6aR<<_(4m-p%!|@KEM-+J8 z)2tS2OPf!3AmB1VGm@&S#zkhx?1f2K&uqGKu@--(`Z8x`ti7(^9c75Wdsi!6Ae6TBToXl)2OQ&>`P_-) zIvC)15zu75S}z0Zo~u998rGj*lal*g6POc*Mx}|j9Mt`Zbz^<_<9%E1f}Gj0LVX_L zQio}237 zIQoYir~gNOukOp(t~a!}qt0gSCNF*0Zi%CuphYyY!NOc!+F<=W2|cYpo41Y>E=;en zo$qB;XZQiGHn_lR3r`aXn=XH(BS)6?V2Ztkx`}zD@Jz^1#nn zlE13w5zX{V8Zc9UoAA}@Y&8aG3~z?^1J~P8R7M>d-l+n0Kk`m0oo}>qBVZAn&gDv$U4|Y8h%A=vM(irS_J+*B%tB= zbc+_;ro|5G^4UR;EV^Ck3DMSXjc>RMB|b0OFjrUObL8SZ86wr+LEo^fjM9${Z-nh+ z=y^?4Uv_;S4>T&MG0Xr>Dcp5m`J zZc!Dv3eRR1XIwLJT*7Yf?ojz4{t$W}-P`z@RGzP6pZ{GE44tL^ZbFM1mZaWt?bEMv+FEN-DDAQD^K~x_wZ;dl zMT!qV>{`ba7vMF(G0PL)*)!@~acUD;l)7d9u_; zxoTd37;reAiyzd?m=+e`*&*JNua^E`dNRQW@#oV+!GX`Y*T_#2UW=d+c(T=Hf*u1; z|0C!h^Bb~#)kD9I*39kGWvy>nl%Cu?r7%^uJxMHN9m9y`KS%%RRK(I=r;A0(y;WXGq*`PDQ4Nh(WC{z`GAq^%OSDSVO8ND_XIz#&5AAj(_UJG_~| zyUO=alXVv5Ir&EK<0>nH{@R30Bbw2F0zS)V>sNxmjR}Q!h>*7|!zt}v1^&;$lBYLQJ%(PA_AhOO7Py4;s_W=t-Cv^c zw66-w`1apcJaE26-kmU7K*ySgLr-bwfB63;1T84qEDNS5+e+%Fc>`G%15*{QP@U9j z`nX@yz{lI6;i{@^$@sKujV!%*JG~t4pet@aSs;or{}O$}8{f1uV_lz`SHDkGo`UK5 zacF`iqILblxtb#<=pR9wtDHgQE`)kD8;+=meY>n>#TpK}%Bb*nQ{mZ%Wp&JzRcq?# z3x>v1bD(Nb;(A)9N4%`}Z48$TA>}WWZj#9C2swuq7&MS>Twkj6EknP{E$xe%J`V<{ z9L~Vn@!+1qh+DW@Ubh`W$e~PV0#PS*j|Dmv9<&*QkE!(Hifq-jGAG~KPAWs_(ySd% z8ybmx%L+csRkYkvRP_TR4=~_=Ey}ZPuNTbwg5F5wjaBvlZ^tidZDiG>;BBR|89YSF zZ=BdVJs0;qF9rfDS8^DhEuNc4b?noVfCu94OM{?6q06Q#9h#{8lj8jo zkuHj>6+V&A)c`{8NdFCQ3EEt!I$Cw5vBG8@nGG+PqQ8UO*N#d`AOK*1-XvEJt>VNd-lS2xuGR2+eSb1?b~!2yZjn} z9LiIq9EgQh#!`dZ@)ma$+do${&-f+eW=$g-vj6pMFKMk$S?t!fp?0}gxY%6lR6qhX1QY4NC4RBO*UWIv6$PEZ@Q=GxaA z-L+|D4;hmxUsQSmzN8H;Yu#OOg9ScRCY&!I^Z#9mv!aq{+rDIE0gaRfm8FX2(SiS@ zG6$tYmG=Qw;oRkI!Z$vMuQay?_P-S>vw$a2J&T;94y()%x13th`j=Tws1-fg*;Z*>4Pzpu1(iQi`1}gJaT+6ciZucQ1{ZcpF1)g2S&!h zimOqNLMvF0mQ1+8>T_CsNmKkzYWyKR&xwm3W!Wz_f`PLH?JHGHS6`UVM3%+%vyh{H zd&8OgdMXSO1G7Sf%8QUoNtN~1_6}8;4$hES%p1Z(wdRTSxM!)IBIo=+_AHgkmoJ4-;ZN~2$GbbN{=0(? z&sk>FA6QPjxRY0J(Rd`)d7MR?W{u=S&!hQ!&UIceB$5&yJ80OQiF*>Y9^^WOE| za>475yeiC~@kbkQ^GeORp8Oz2ZrDVf3w`2?pNrDm+rjMIFNC?rbe>(Z4&7}R zN6%*D$aW%wqMKLK|J^=G^S)H2>Gulr^!d-V<{MY>yAyNyt=&j!Q23bG>0L*szhmgQ zj`y&q*Pz2o-_ed<7IFFBc#N&6o;RTw$1dK;)$kq1qV;#FYdYSA-0_0~HrFNb348e6 zh?KU-91pQ9Q(Zi(J=(qurX0N)KIOo992{1>`V)kk>@TRb0ZWmSm^ zZ5T$!D<6|wo#=!SPV4UKL_&qkHu;A%|>wMG`1SN4zTKz>w%PPxSL*Heec z`P^Fhcts^yed%HOz%iRbSDg_F?KjYgRTaz~*YfF&*EZnf(S>!?6<>IIom_GTsI}$A z)4Ryxr&ueSQb>R0X0NMp&s!2BI__<$|9Vw`YIeuFpWAlv`l}5@YNfxF(tIgL-Av}M zJu99J%lyEoXTJHYBIRl2$SspqzfO+y^?{#0WlUv}RJ?6&t=6*5AyuzcN7m(W))jer z))8&glS=qrFpW|!Hnb##t(DOi`;n?4`um0t&&f|ay$_?tW)bd1`q6&+5VTTy{pZT) zM-z2UH=k{nE)JW;=OP=a{fLts(!LRAb=1v?Q|f#Ej;(OtvTjEKiT(WcmClJ$@2(Q`xqSY0#D`)F65Uhewy=eeK%uw>qV%U~H;k zv*Mr#(HC;?4oCVmQOJ#ZYU*8Izx2UpQ0?8d=+B7%#I%F^nA1||OZCR)rdvf7PBd(5 z6DMVd0 zzBeq5Ie1MPr{Kx8_fH-)^j)+GIF-P+FaO1}Kxx>oq7?sm=+1TY3gY*%Q~Y!AZ#h_A zG{Rmi=e@=9%9fwAC2EiS9}SeMerCPDLi&LmIi|`Uv90J)dg9cB)}NZmUgI2j$oNMz zt-(O;NULNjFy5L*+b{K9w&D!|E2%}tUG&`f5Z}T6eDcT^8Co#P7&~Y(|1@{;l+Sl4 zd>P)o&lx~>Lc56qnRWT}plWo*=9=hPHIf^?C4x<;J5coy48>#VD<41if8UkMDbI;s5*`Xhhfm z;okB%?RYqrE;`ibz`7m^#|b^7#eD(S&R9r}XV3ADhi}Ev?pFNY_rW?i!`ER^GTv8R zOI5Eo75VQ(sCpE~Y}ZKk?W$vbscBD~;g=Dh>Pz7&)!lMY0%zn(ucG8n1A{B zCvdFl=#k2pF&9KIQo8w$!WhJ6h>>UDU4iL|*Mx(!@1o{efFp;r5rEh1lIE zODIl$vb&YSWquV)8v2+!HaS5HE^p=GJxa0fA~*WG(!z1GPf+tZfwFPPcdB{i1fkBT zpY45$Z#hY{Hdk`dixoKQX-)n!t)#j??&$qPu72XL4ZwS_ws_7Zpp>-$rZaehE_V|I zPGaDnwC^#1NBn#z+>-*SefA;t7;=|=d`sy+yEy~jaCUd4(_HIEX$B`~m>0@A7fPsg z{#f5#Ko3Yf8p7Q|ofS99u*?a_b%PD)A+qZ%=TQa`oC#ksoMVIe9+CCq?4{mARKGc^s_+R2qV`D_Z$4TroFG0kO{UMdcSv~p) zq1J_c@;Ft~v~a<3?TSqoW4=!|id*$3(Q@rN++nrnAJ3@n-{gKbNJmEEwCDa`8#o#~#7|d(xztyHjh4?Ou@f6wR zx^UxpQT%u^_KALnXVl)Tmha-%s(Ul)U8rG(=sBv^_=(ePE^S7vaj00)`P!mo35^F3VW%njLg zd24Ps{j+s{p|l!!l}>HFjdvOUlSN#@2!A+jS01GaOlT0kHewE^ z99Scso*5(h?{=ULE4LeyI|ed%!ce-cU9W%iH-CQa{dFU^{5;nZ@3VrHo; z5*wKpc~YgV4c~uqBXig>#yF(n-ga#_GB{f49!Zh+<=R0VIn?<(x!U;B#>VAYVX^9o zk|k`w>2+jE{nm7{QFZD1Y7Kk+w^8mc6fg4+Y(`@TeHLmiac{b7)@<-k1kW$Cf@9zB z;foH(DTRN^iBm%*?uXTVUzxv|PpsM|HV5w#J*H3Ne_w{`%T|n0xCcBO#t)XJO4Oxj z5bJ^d?58-Gu1tK$)lxH{IkD$fn_yDdQvB$A9@`vy#WmVT$}iwE<@5d9I3jI?+%)T@ zxbEIbbXz}Wcd;pC-ID{Nf~@-|s0(^gJMscK7Ew-nYcfvA2ZcLZ9SRn!jC_uiQc{bsoV4^s8-q z`hrJ9iBxMCo4*q!?mxrnr3Nu@kEZ0T&E-^iq@2Ld0f)=!eH-o&5g8r|b3{gGYhZO( zAIyV*<`)W+-p{;h{BBz}mjl}`eoDRPbRyI?xbdfej+O9U5*!I0`XYvBwBd5ow$rP^ z?M+~v#Ci~Fm)zFv;hDS6TPo)*NY4UsW~ciJiTXB<4t*wvc^wnG3wfH*9)ubvWA1!T zt8tW+#;Dq^hzb1Qv}41KLK~hDeopw6FDSjng&Rt9pbw3YTaRe(8Wm9ZDR$u*kdFDcQ&vFoJvF4A4n?uJd{S-Oy>_^3d~==2$MnYL443KQENFgx(`y57%(5 z#LiDO`r%M4v7`4a^v2{ys4a1N-j&?-ZgHy@?eD!@I(NK4+Sy91)^})hiY#9J68}!Y z^B>JSa;t-#we}Cgj2w$A%h*TQD{qh7mQ~nPz1ew*sp?mrvL6mU^U3X3&w&55V&CvA zM~o*u!t)50fqle)_D{I{UOaDE^|*oi7LgUJ^QJL2xJDl8{4Z2K`{Mxq3?tMxp`LVT zD(PEsDl0x&wr~kyU6daYeXkT}rMcjN89V|nc9p`qxaiQ~iOqgJ@Xgf_b=vf@5% zSh5GB{|GotpC*jW^?dM#yj_&k!6yh?|Xhe)az!f%UL=HoF-z@PS}(hbN3q`tKb|C%V_c{b6K!(Zfv zjT0F-#BbiFh%POQ@%2rv0=a;~b*^q@MeyYEKtvZE8VXu}g7RAgza>Y{EW!0(T$75M z;kUU{|GsqlP`XTXzDXS|!SZ&?4ygZ94E~~e7xsgDuF(ER(pSt(l+e_?5bw}ztn*mL zPV3K;8=uDi9?W|EFo~Q)!gHHp;|cmVK%#~%Z6>;#@JbAyNT1hs1mCWcdi_GQ?|5%x zRhY&zkL6RELM#r5K;5_Y&2!j?(T}w8Tvw$tCGLm8!3-`D;7aNI)ZkuC^ZTB=8U?Ix zXa2l*LG*EKq9I?PPGd`(@Y%U*0(|5b&7x4Z84RD1G&(Yoz)J+|XZ6{*4`{^Z1kNye zJO}Cw^xF4;x(DFyHuf&qM~83G?E)_v zm#X##);CMsGpU;aFHe?c4djJ$HKFmx)9{Q`4qerP^sO7j%o=krANL4aR{U;zRMmnp zaK;%C(mqMnIJ{c?&A*HtJIq#COz0Eef=8CAY{++VM;qDbWd(-ML{_rQIC*w}$__Xw ztQ7btQo@Vo>Wi}1CiA+G+SF|70d}2uUU_@tRv|kbIYQ#K_xV%?gBrm3>_5jTzbEok zv*R4gMn)DP!t>PRo|}8}&u;6)#9u5_%>kc;(g3SRd&>`zUnTHcAd}I`?|scxM;;|$ zrGRFIMu`>5_bh4XPgh=5C$}0K8J|bx^A}fIS+9A;2R^DYtGR?%(DEikD$OL}tLVRU zQ|Upo^Aa6;f1084u{S@hlb{3m<=j4oKP^}NZ`iFWsBpuw^=4L&gu16`G{8ecE23 z)%F(Sthe)l%@I6r^#Y|4>HJ1Krw86>_|}WbehCPvQ-Z}anva?#2s@YuVJOyB_**X(trd7JjM*I&gl@4|a@;8)Yh`nA$7 zI@TN+RRY7SVL$UC$R@H`JG;X`t^p6Ujtjc2#aWBx3HQ%K-KJCdm1mKSb;nwd5sCX- zFt7r9^Hh;aCk!f>Ab<%1p5J_W<_&lLXKxl86ovN<8}Zgl``G=^X-=$+z1D6AV{P0F zcmm}O2xDT!ZNOBD7&ihFD>#`sGuX`l0AMerW1b-DoirKS-|hr*i-2|fgc&1 ztb8AGfu?$;C66WW(L8zf0qL9sT(v{i_dwwD5?!LM8j6db-iQb8d#YTJioI;X%NE+1 zNADlTTC9Tant`k?fz=pyZaPWuO$KsN4SA0~G|zR)Q*8;Wo;h;3lk!nK7^S{U>WV<4B>RAFWm@fw=sBBc6_)|*@yK~VPK1Gv-!mP|o4V>Ah^;`~i+$3gQ z+au7gr1TPWi^@5*fsbzp@FDek;{)urSKilDJ%RivH|K=6)D*uo@Vb+mNe%C@mXWET z7rG*!)j06KU&v9c(L3uHIRzo#!1j634OqC7afyVUhOkf zJ_H>N|M*Ev3O}GBUx9|IBb9Ff_cGR+t6o1PKLi!lvGyv9gO|E%{COA)jcuWe!8YR6 zGM8N4(CpVBsl4*LMj^-wB6-4t^%9vYqsK}4pK{rLa%PQRbZA_v?3L=P{}!3@MVFa; zeq(!q>mWy|O{f0)3wT**ZwKhV4EcDnm42YWmfTzj`IJ;SZNnahh-h68*=saioOK%S zHMGmk`OrJGuEy4m_u9f+wt_}(W2)>KJV+0|{--I7PoKV=y?;mX;?ApO?H}kj z&tNL=6)#5TT}AWnuHfj*DO_syHSTl_IcU!fRQlC%D(P6tu$fScJP!<`Z6#{+z6Lj` zcEm|}V|r2Qv;8wywDRGrw$*t>!NR(`Pln;=HHM4jxgb{-t3wt3cyLyHq!_bppmaGn zgZ7OL;L&-Diq4iW(Z1z;zKp#vrdHU&pK|)~T+Lno@8x?Qnt7P6-dM{e(?`+A?2ATB z-CI1Mq&5BN)tKL(_MkqKGBo^u6n;OHjyfMC^-QRN_X_GaXvY;t4zi3Gx|T=%8%_(F zo#XSCXxVl~jF#H-Ij#NaX6kRYllnRRO-1gofcUi23hMvx6n%HPEG`f8)+0;%>F-up z$>9UuiHJiuL!gsIp7?Tw3%qV^)E&`LrtE7Yt`*SfP{sgx{LNmT_#|2Utx8n0M**#L z$A>&TViC=YeZZF@kMoaG1H}8&JL!1mTUxD?DbzW5pp3CfrnP57c)6x($%eVu^k z4f~|inEcyi-q>6GRtECGqgRcsMYr(Zlqk+)-4FBaN5wX5)zs$%eZhWx&9~81;}Mr$ z97ea>mZMK~Ci8LEWbMJ4db#Vfpzsm$ED?w6J*1eJQuJ(Bl(^Whi0~WaZ2s4=Hv0`) zCo%>W#=hn2s7cZxdS0ZX@ak}xQ$v38vwKzOV~s)l*yjML@r>KGTppMhC06J=q}JM# z%O-cH$!^8SHr`42v zduqK7JT2VEeioY3Tp4>JUj3I2HLt+2f!W;B+WjOCKf{}DccijaoY`UdDR#PXma5$A zp(RiLq1H}TX|kRM2X)|2-u9e1BV&m5*$`Hy^x5T%+p(|67J`F=YK~+vJKJ?K%3} z3c@|nk>Z;<=U1W}<^7A+r~Z>3gzusc&fuqwyU-u^y;9YwW6ENA_Eipt$Mxr|W}VIa zhqu#ToN3jm%uejRS(yHYKbN-me{sP3AyoF84IkJR$DU7SiP{6aWY&ylsKfFKJ9viY zS-K~)#Te{?+<)(WsqOE|vxfhsC%2=0nAU+-#XuMzfUA%bGmX16L;nK&pS?-kGZy4DHrK;`Lz86T1;oI(6 z)N5ROitAI!%ve>4!yU`>BX8%6wY`nD zj(23gSqmv(iG`cE-{W^DR~wU@M{>vpPkORFAW=^U$Go)V>VZ=vY6s)HCxI#QjWHVg zQxvAHv(EG1eqD^4Ei&o#TRVkQVo1f~G^c@+@$2|m3R-?qs(GCjn&FGj7xAn6@DMr0 zNU}>xRM?Grrdil~u4L1_qPeu+ua9b9_#ybBi+{w>eYbq-nB8Cgt}6((GL2` zmMv+<>5Ytj<6@&%@a96{6j1Iv{WyF~tv|U2mleQ1@o@bP7jO3J>VJlp2ID;j;?wr{`W?O*ILQ@0bHeV4kZ`tj6YYY z$6x=%$kI0nH9%FS)j(|)r&rFC^nErb$@~0san*JoDL%VbF-dHFSdi`yn9fhOy^@xd z39QEP<9RAYcCyxw2ioY*XBE+b+0=)jvugRP!d>0YtL1S+hqV%w0X8U5J>Dr_bqF2KcTz||! zZl0Y-qK;*B<`=Ealmb+%_ac7(-6Do&)&ZBEppxmYM3+jfS>XcuNdl*+KtMTa->MYH zoor8g*9S}OiIr^cgmZ|FcGj$K-ti6Y*BbTkh3+H<@#Vh-^o_9HeYQ`Q+2#Hjd8Z`P zfm>3JShJU3cR9%4_wP{aN&mfC$_YzXGkS*2(LHG2#@{-4O-%iei9I3z2=pS6b7dOn zF9qhDm!8`|muqk4o_Qk0n-6~kYMQ2gbTo}$llbA~K5|^+)@-}`1&21AMdN3_mzM@r z;LQ%1VsfBHr<$##M&(n5z28Q%FZY2i{|%w#^%s*{&ob0!hrQf3uQzvDzMT5E45lHx zLUBJo$-h@Ro*GGax}KsH|K;|Ya1Gw9{;oZ&y9r$TPn$U;m~*OMVRGI@TI={+O>=5S zB^~$2lLHM-HRHHeWjk`2|3v8pc1=9OJ!-FE^sjc|6uuLmJd0PhJS!Z`+kCZQGEX}h zDxiv>VD}`|C$enjXmV+Lg>hZ6 z?Z9}mde92#H}Ee<1y`X_R{k`}B@a(}ab0^cZyN`FDTLp#rnAS!@}xzcdiZ^N6a0rh zvtiS^h8%l4416`2tK7RS!AGL~zQ(*bU^ni!S;Gozp30{VEK`b2Q8lVKMjlUC%ZK7G zQ2dayJlQQ0&S%)@dmB|-RO4RbF46ery$Sk~cHzlpU+!&@cHfbxvltVJBmQMyf6+9t64DxvfRaM{djzs}h1(o|NuQjrqyz3C+Qd1w^2x>TRh zn-V=EES`?KkNsU==o1rK$XJl!#%CA*$3_17(4-D)1nP=n-|BKZz6a14DdM4 z%!@J3zpo~nMI}*@=x?(9qnmQ>*2|Vz!{;eZra-@)9CKwayN?UyrN0jH=P|Vj^NG4} zr1mZUaMhwExqnMrS$|WZ+&RCKm=}2eoUZLC&cP`K>7e}(g>wuJ5#TFPHuJSaUqLgi z6^bWOcNCufS$lV~IDum%wb$h5F^71Ee~?hR0c*=izBd@S$^BZ46|UV1k`dKf{3!K6 zc%L868+=|UOtv)0civc2DT0Ih!KZH<$k4Ci;Lb#mx;#quoxY4ay_%u)tk}0Hn$Ex9 zBZ5y&0ye!eTn=W@`ad;!a9MY1dp|`R=TMiFwu^dqRiFka$bGxvo6(&kupe&EJjDol zUck4gdgL~GIxD@2HRJ3L!5Y>eS0|%4>4f`JLw7yFp?E&8dXR(Zzu-@kd*KS|c2N$+K1pv!EU*d{Mq+l^=Rt0ie`3DbR?Jt^D;A1b}B@R8fZZsOh5 zw(^MIe*B=818^JfksTXr;P(}77~_ka;bm*rvC`qtHo4pa55~|Rx&05`r<7bX<6i{wv?wkc@mCe<5+2s~d=G_vyV|X`t zEZ{Z#udRp~WuvsDxY;tC%?)o1#l61G*D-jL*QYK5C)k_U@SSP$lt_X46iN>sncIqi zOyT^DKJm{I7Rw*r_e-9l<{zE)}Uqum#tbG0*{ zJ#I_=X5}bdr8KKpTKx={4NFFD@I)3qS4{P}Z~Hl3qL=LsfuEC;+Rr7Qod+d6g%0jk zHB7ZU#!>zSnS8tL7IxZlSUTNkDZP#sHc^8*w1IEEjbG{1{Yw%WgHiXgV%w|gvU~kXp`qpX=`M^AFFfgX4U5jua`Uc>(<+RWBMwsNv{v^d{vaM z6@Ald=hiy(M=t*9366UHg^v2ek#$6#tLN~%+%*lJh%4062sKZ&zWyiQEWM>EuNHbE zP1Pa86Y+}4Cvr0s)S*xqkNOwd;}LS<{?gi|$ogFOauE?=xuUWIsW7#?7fy+bijd>( zmBl*%DXh3tc}CPf!=KUcflJids5}Q)ZznOIzVI20>x;s_@ZI&Wc*;2!3#~dWS35#8 zp*E*bqYp>vz_uy`zDA7(!>{@Ag2LX$^X5Zjl|r@}<_bRY61iW;vr_rJ$m?Y#a7JE2 zeuNs$X4PBJB79_jIOM&IAns8>n{srOZB+B)S+Q(*US0W#P}^Z5dvFAtdg*fCh&^fLJq?J<~7Dn1(-$?T^rf0hQu!l%jD+r% z7SC|<&YrD&ue@E-UO)$7J#9_gH(OHLnZOk9}%F7gi_yyYHkIU6DNnh_skqtnI&!I@_xu zOHf`H+PSH2m2<&RV*yU*V%3q>-eSdMA82Hp;hg#}*Xs{oRhrt|sYBZ17o@ae-4Rz< z)r7(>Ro55?skEXJSkv~_SwmQzC<%}CFCVr>Fle8 zJl8IZLv5esu2E>iAIj(PfP+bd9%IapK<+9qR@v)@Bl57O46KmwgQ8{HH((2%iS0TG z*Ip-~eQ4W(_nQ0Wa(dam`!sML_Z($l{iE?cLl+HmY^dzbX-geK?#3x|>JjRip=~tB z@Edr?!JFZ!DXXxg*q{;ybcsX{QvT-+pp^^g&}G*L1ZWhy5#;p4eTZM?S)Z5P{RBzxq)N5FV-;Fd_tzhD}* zf8-~StC7Q(Al|y?K2;p~2;*36Vh$|O_LTZr3gxTsh2$}JOc^a~p4O8I0avuf_hLEt zmBcf8_l)dSpJd;C_2k1b&v@poWCN!LiF#E|lKG?q&j}sClZuAYzh~`eXtzAN9(+_5 zT(+1-&zZo^Uu?_{ACDV{iWe723!+I>isByIDsw`_AKzgGJ$d*1ne?o86x*%uN~bF4 zW%JW-cKLTiWR&D$VL|f6`@pZ_fUs2Qh z>}J_*2g}dEtkKB^+dVF8M6~eG))oIpD|g@L`s^}$B);`p(v^`@K z`q@REsD6&)&UE7W^)7MzpfFDD@PcFdHkIA_f07pe6I%AgLRwUTDRgl5MM@2}(VMng zL{T3id2+xD8P@%+ES(c)B&HVTE4?!1?5f@du0h`_H4yI?yy2ksU#U^lP-@ZUjm*=K zxWR-xdSTlvp8aGv|AF>v-O!ydF4^^0bE@YOL5W_H5+de{^{R^nV;(cU(_h7_THP zvJz27iINeey63r+3MIP`FWGxwHF$nh6 zJ4t;*hN5QN2hM`$M(NOY-dxnQX$XA1t^8{8E}XOCCY#wHlqZea$-<-qZ1<4P>aVWO zKsh08d{M-#(F$Kr3U~S!H3T2m+YR)$?D*9gBChPs>CMpVLNk@>LVaVf5f0?KAmyZT z@4K#Av$z_X)@b0f)QvpiNi=uczYtawhVU~|EqgmL5S*{HSJtoE0K;8kAfO^a>9=7U zl*P1RX{&tkUBy^s;$I_qN_HxqU1K0mpJ0l8W$N+task63Vp5&KDFTq|5cJmhdw(<7o z$3wWY34Xczg1zlDnF)My?s=I%8?7s=5m)(uNZQw^MGBvB_z(KLO>_G7XrGNY#xcS?j&dIwdd3uyMcgx*`NylS=^<6}37Kn!nZ{r&SZLEk- z!OFO{pg8%1-mrJz>h%w<9-k^XwA>`HN)q)a)^5jPiPO*db&ox;$owRXGkVAgN0D+E zXx&&7zvnRc*KMo~c|_~o1jc>Vlw-%0NzPZ2aKWv)@N)fnXt1Y0r{Cv{oi+m=H2V+5 z&uq<$*|I0AMP8_c11!eP6sd;fijP)sdzck#?%5uRJ7C2++Jo>|7n#-rJU&fA!XwNgM*K+Q=TC9zkaJ8opicBg~y?Mr%`rmHjP{a2-qrgoD=g zO(?LOYL~bDrGr(mVG`9mHtf4!v46OT|EaQ4cKkdI?m-RY)*}zWgrYS-@nNkEZ}P7_ z#)7f(i$yp^@Z%G-)P>UJcld=I=VDnr5p z(h_grYmYm;J?Xw<`c2ed8yt{gof{IoN#r)wt_f85jz_sH5KKNr32eZ_XHua2GlG7v z`bzNji;TFGn`OJpG*=$3u?qi0`>{!<&+s;X+5n9?*Dvj?zz%uR(NNZ5P8H5mV|n$Q zAC3Y;TlLpKst+{&9E%_B|5dszc9O3if6A8?#bET&aWJ8wm!$hPpEo}F7Ky9)h1XYb zW@b3h_sYbhF!{oC2pc$DAC0(!*<8E|)C*4E!l#FftRWuvX3X1HS@`X!#_K>i|4+(2A zZsrHVj;A7T#adw2r5-@K07oarK}d`}#QZJ;!W=oZg$*|KvPG&#zNoQ=O57vqy>SHk zUJ!aPCi@@%_vE6o+*;)>-IDOtLJv9MK{*m0I1`RbALq7mHr#ob)A!R^I_LR{YuVWU zNhc*^(>rIO&TrefA{ihib~T9gZW#o}SZ~#YvL_t(i<14SBsiVSUeR zMtqYS^V?hK2H9qMTX>RYq!teO1yvL0;O}P>x!@w91;5>?#GEe7xt^mPw`pR*6I0t@ z_cnH7j(BCDGn?P-3m5ZbeLpfw9b?j&8AyB}Q?5G`mhzj!?P#yg30O1L7UP?~WHc8h zIE*w0KV7<4tQ{lXXJWnRT6o=Xwa`A?SP8&?H{vJ{9?)k~@m1?m;%o!#ZFP$696uSY z@_?P~WW=9q2H}O*L-BpUNUG6wob*f`&W`bFF8g}hFq^iOs4-wUCp{#;YuTBZwF-d! zX`N(RdwkKxhd5w9tg5uZtafSK)_i~}mA8;`108Yf{!QX~LaPhDXDbSI~rTN?ml>k;@2Cy8a3n%U$uwJY$rHgwZ-4I z-Q>K=Z7gk$70_6aG$oQYMNyx6_jRQ?vpVP6SLOq80yf#U5g(pBO!|5O*5r0Z(&$LK zniYf|V#LjybSEa8j)Fwmi+e)Di%PGxMPlx_ZFnj44oFu%&1nSY`mV~^*e{CUK+?Lr zWqboH-|qqjuWcdZmZthQFadgAy9~^(AMaA?#k(}#BWYc)uM!@q6o09{`wd28QcrIf zj6ase@QRPg_@I88&~p6z+i9%!@(J*G+Ow|i59FT1<0jc4{)V^X zo~Xi46jyJ-OEm{!^%@K8bzwU|NFE(CK=FGn^xdZ`7m#!sQv6{?i+txXO}+~Kqgd6~2Z}Fl z+tUKbJHgsFbnZ{!72&7Q{C5-BRQrk(1_+MD$p-eAb=$vAFK%@|%zIuNjKWVfmP5JG zH@ffO*b2;l#~^VD_O^e5Tb}Mk;#62tRfXR#?LooqLQ9BvK50@UoxbVGXEOCc{+~~?lbd(B`CBYc?T%ybri`DOAC2D=}%A! zZvSH=X577|KjOH3HHwaNY1Ht>F;TxU(^S z&}N>yIF!e{Gm~kKitviNesn>Kjo=xX@}N#5hCb^k3#<~ls<{@u+tQ!2W@$GSW|xn+ zE{PRxj%EG}m!X%tDs(E0(ykPqBX|3-JWjX^q)TKPzZCAg8v>4S& zCO%Sq;PthIeAuC4M!Bj8E&ZdNF$(S#m`}QhlW*p}V{a=oF1VvRL?&LBXm0Gql=jlQ ziaW#w8{v`m0p{%x%th@HF2XPNg0x6H3a?LI4G25&rv6h!e9r5=3vOm<@SjF9BfkM# z_FWcytF{;de4ulWTuC>)z=)KO{I2n0s^xr6+=K(14v|+iS7;oJxEHN4 znGqiV<(e8cumW$q-%9<6d;C!AM?hGv(!X(UQWOYHI=Xf@lBSh>laig+yG#YA(CbLO z1th(Ph6jCw_LQn7zUFiCEU`&qFBH5&x`>mGa}roexgz2YA}5Fofa;p`qPcu#$@V(j zCEg)lZ>LiJz|r{w*nb*Us#*30!hs~hypC$?qlsA1t20tgAUgV}`zin^+5IR93zaY{2@dX%F=K&nArgFk2bbVgJ z2x}PWL^gBPTSz+IL?!(zv^eZ)P|d1;G?OXk#dk3Bxs0?2Xsr)JiaX@3Z2_U4cX^W8 zw_M>}2?u52`QNtcCA4H+Ur1<7;xHiW1HyBK^bU(nA4vT8ldvOEIdVEdA=+E4a}o6=7fjo8nn2MTZGq3HmyRsK95LQ#1xx!H1f_@I%FQK^WQtNX4PGnbU7|6 z?MZ7rt6yU?8?u!TWEiwzCiky3w_Y7%WIYVET&oBuIu$Uzy7C?b_KC^q?drk#|4J& z$+(8)(iZ%DPT$$9ca)MaI+5wTyUc}tB`r(b*H)$4f_0(5i4T<>yA1?a-82fRp8%I?N2r2}mbZ># zr1MaC);)vw!L~iS6w*lwd2^NYhv?%eb(|HjhY_Zr)_OhFFRd^Cw!95ao|ulK1|<+@ zD&${wp!e`cK%69zufg>_9bnw3G>NXosOBZ14Z1DPVf0zdF*8K+pq%grr_6sK@|ALE z2lYL);p%{H=+PyRnT?3$)H@>26Ny5XM87|(+;EQs@+F{a-`JTj7CxTe$w)t-V@GT7 zj})nORI=*_Q#Z$o9T+!Z-mYp2=6J7HE%+<)j;Qk$NoZMALW7 zdWH@BwmkvkG&7j!nb2o}`WkFmQ4>{QOjxwJ8haoXNCV0@+#AZ&!>aS9gmwH-!=6Ce z1bX%U!TU#bL8=iJcT#q|@#n0_A;B@CH%+}~&APm%zAX97ZeU@21tuGuBA*}POuAN) zdpHWbN71{waIYN`7`h|tC7atUsg9e-cgiK(GdXEYZlci#gl=nez!Ql>(RZw$Dsq-M z8NJ;yQ7YfX$X`K8pWdAMEn>d7^u{1j+k7Dprst7I(SKnrBV7e&W0%749|cmcwf(u_kXua5&pl`} z5U!He4Fbw9m9#Z(^Is|W4&icmU0w-~OZuF58XE%SpJ;tANrcHv)V|PW)XyeO?J4w2 zU7w!x8}Dj{eCw6tT+?_GlpbmX)3zYiYAl1q-n1XkvqEMtf@(E$2c9=8f~RILA@5js zd1hOBZ@|MGzHFSwj%6)^GY?-ucJX)&J${e5&1%iQB{WuT+IhebbsnZm5^FI(mhXGA z8+AOJ$O+0(T>I%K{HRF5$#l>8Yu-w zYg2Hq{vsT`&K2EtlAy!yudHE2v9xJSA+{LY8Xs&e#3xV2v%#*|E-ke%KECuq!du@7^$IK9!L|yv}C=pX;OmOLH4S13Pg>gvqQ`8 zK+AgF@Jqi%u&n7>HVdoJWO`?I(X~0y_o-!7NBPUe^qx$qGnS|2@}5T;!n?5-Sje_v zBKFwPsby|y&`&1jzSe#P?9lAb9M+lRlGL1+>3Tx=TlkHx17v4D+ z;mD@(it8dnS!3lm=y`Yr4t=Nz6AL17ti2B;_Z~KL7|E@1%gM%kSl(ZWs9*E{A zrdVUmwO@?ZM0%1mi3Qb9qkRo^WD|n~*5!T)^m1<|J|~}wtfV;Xf%%?4`7qOlYM(vN zSceNixxsy%;8wbxJoeLQ9(~#z1G~k;oy~i}&iaGYchs!Y75#|k?zfUl3k@N&rzej5 zXm&O}Gzxl!M03hFMloRB(`Lel9aki=j^D;NS2sBpOZ&8UvHK2yL)5ppzd9L5dLPL# z%NUCDE-x0Jr8ov5%^OQPZ_Z8mR*#vs;rQ#7Bd5Gn3>u%KeLYSpl;248#8Rp@;qk^X z$^a+A>eV}(3x__0HqE_cimS*=NZAnvR>M1j)u{wHmh}{SaOFiIGs1HzWGO@PJ zo2=&Idu|Pm<|D5r@B`iYa`#Sc4a_ z2&bBX@FESEyw{uun@cL3_u*wbQdxN0rFe02Q>nr877%w)pXXPXV)lz_F2;1r<`>)c z!U@)n4u@trA=s~bV+f+N*D3GSq=g*UEdQ)f&0twT0v-;!$q&u+qO)Iq=Nj1cfw%E4 zaJXP3YNsp%%4rZeXwlX{6_^&jQJXC~OM8!`S5wYtLerb?&|rzgD1JckfQ_r%;L<-6 zNPWKz_VwQ;>8eNSVpQd;!A0HH_-&y&Q>DlZ|j;u?X*p}=C`JLV#+aW-2N`*Lw&j12!HPV{4V=p ze^}||z8&0-?`E@2CQ>eki5$kw!>+R?`<@VHe3YK!M(A)b7*?-)3jHw)H?=6ur97m% z2x4Pyd%?l5fw}WP=%D}OS~fIb5m?@9#H6DG;Qa3a0^gA86|NQ)u_sA%9^B$Uu#3`= ztqU16`?&$_x`gnQ?myVPk(zR!J#-Fk%~iNbdy!T@J_zfsZR1nBXTgF7e=&1s0hDxZ zz9e8BU-8=4sK;TQSikC%TH2jd2Bi*==nc;nNaF z?Ajq}2d2-;gVE#N7~wr!t#OARz807en}S=Lbe4PS>&pG(9C%#U`Yhn*Yj)eViF~tS zGwUCh4L)70VW{&m>=N%?$7LoSHZt*+JS*-NI9BwAHlGLap$~)D$`D^h9L0W@=3+&^ za^;^qjQfW4#G%f$oUj+GE!%OM;J)fn$JyM&#a6!GJP)ZJoNG*?*u0+g1fEMy^ASbu zX{Xe()_d~NZ+AJ1KNi6X7v7gjJU4MsLsSz~JFnTT7wfQX=tQvVq64A^Dd**gexY35 z+5tpeyHqa3hSsEK>FS-~u2G0GsK3CP*BGRQKw?HkI6U$ai^A=i5MW$O$hB6g5a^GlUSfB;exx zN15Q?NR0@bl69Pk7%mvJm3L2jK|Hw+1dh_$^Irz1asG!89Hk}knUe-PZavWz1;^?I zbdkdkZ-Dy_=$vz2OI|Ryk=(EQRvj5qPf8K=f*41C+eW!b|}kE!GzzI>R#`8Xn)QWBJQoI!-h%b zCKB;W-M=FrG*tUF+6G_GH3H*LF?<*cSBQVwsokD|mI{5jYpx%K+Ivz*-E(ys61Gu}1fr(#Be-4KLVmG*Co`IIi#Kbn353JMZ}SM3ZE?z&1E{|v z2Z}D$@RUWhD71mlRz4-&_-EbcaB5H$R!s4R#t;12$dm11e)w`cRDT%Zf}7l-&u9GO z;mW>R=ppe3r@9gP1MF&+5SR5+UqA24QWh;^6&t-Meg<;0pIT_!(gz5?p_Jb18L_Md zW;Gdv^TWS#-G2v}OYKG}0A2v;MM#LXVUzEg;-7jRuqI~#Z0=qqzC#uGMAzoCSC19- zuDWK0qR#~E6dkyidrBI5HWG8DPab?sK2PSFy(T5|#0=g;9nui0A|WAojiH11KPk4<<@%zAWKx1WtkAAqncsR=guQedK@<1RjE#l z)k9Y0EH>(;5|T9pv#yl$hI2}hv5>8XB+B4D$NLr_kr&8ZHWtOX(%VK=Qw@|O!eBMrbW?e3J`PZLxv4OHKeI3Nu zWDAXe*Q9&=KtoqpziCmiR4NoFMzyB=*qwmH zMM(7}mj*Y6(6_gF@79M%|11(ZSZD*hw&phPe~<3vOg_)*Tm2EZ1vZmiVKQ$6q&Lv< zlRYC(!r$IUZddb}8@@k}emUN1!tX;k&u9TF4iDl#=l7_?0OBEB_oXo%-abm9HAV6; zD&Yx?Kl+LZd@z}Q6h>ZcOSw+(9(}5>mcOTWpy#)O{Mf+~#SjGFkp_fsunS2$)ex;zwwhWx4(j@rXFK=gN0exl!T5SVRsV;_EsxR0bIRMOP2cSt=Xos1Ff z^&8P$AixN2l9lyr$4Sn?+0s;R#A-*gVXKaDNx}Jp7G6SWqlD4v8o0 z@Sc1EX|-nZy7a$5enMa)cX15DtxFF`C6bNwbOCX1zj;a9!_)KzxcXZqT!U`nvpjD}TP*=ROnI*Yjl@6Nr+&|;FoQ3M zjNsLli4ZfW7v!!F;j7$l=O(PQWj~`&C@1qWQD}u#kM6_pGfjmKVOHDQt7|{4mTxxG zR$8971N%pNpzF6Av=)n~heCTf<-dk&}0%cyaBi&_0`IgPXCoRM! zXR?vJI?lLIjB6F*Y~tTHlO5`M4%9ml^@RD#AhvqlRQ&U}2pa^qVU*WSDcPHQlh1~- zvF&B|gULb@^9F@`NJB1QXP?kJ_onvc{dmbjfyI{SO?-v+PCB+X>)s6 z%;u(b^^SKb1`D_UY>MQ}O!b54C3 zHNngR$TyR=ZHlDHKwvP{gKFzMg4M+Q!^wB|0&yWOF->RNlaYE%6*z3|W+wVMms)n@ z8))NN zfV!@UB95Hap764Q`1}-*C#ch_8Q)bn=B$Bg2HjiheGQlV$PhfpNUxI4Zcdmpl})C1 z?$y`yzTwJsb$Xt3C_Eqig;TyT@&^j#DCwO0=-I=oj#CKR*`BMko}zl03j%9BTvDm;G*IY2)&HphaU#8wNji+MTN<6vmtxQWg8+ej zbnid|sD7FG>#=p7iL^QMJ<rY^AuW(g{3Y<0k;kW>Z;t{yV?9Jq0~P!5k4)4Jfi%^(v4zFX9#@z z&bR3~3Js~6yh_9|t&5ev>%D0$O;ypqy|=8Z+#(Tl32dKKu zisQtSgj=;|_WQRLJt}ai)rJptH#pT0r@lc>y~P?b@iuf?Y$5cON*X{BTqECobAKtTWBOZY0Q;ef~K0DcP(O2Wd(J(`305T>XN3R|jW*)~8iQ=S`tXXGCw z(r_ZrIpF}Oo)V*87m&Z_xwlp!@fFaxdGqD^ob(=0oaK}IPT}1W)Y6xm(%+Y>?o^CXRoB-bwpl(B>Za$yX1fd)`BXrpHbuM8&-~%MW@kR<|7R7ywOfxyZlSzg*QM~?JPiK4{EG`l zx8eJm9s&o;4J;=hoc}eu4Q|EJ_)WVF??31Wyt4F#PbEzuxWHQee7H!tKEzCJ=~z$Q zn0OCLj3pNTy|+5qJePSVnZfI*f4qZkPfUq(2a1Q1V3CQY-ROBN-&npk*MEhxj=z zVvJ1V!7-S6;416-`7#?hU^s^Fb40Is1+1RSGVIo=Gi1IP$HuyM;dA>(v4^zp>XoKD zpl`JsxEPtU&S2D-@v-sN0iEQ1+i+H;01$_QsE{tpM z58q?9K_B~v5Ku1+n&q9N9DE8*q+_7JS{HWs{NNu?bcFP&7a*>6Jxty9jIPn{EL|Jh z4s#(raR8eQa->||0w3P`U`h8*%;()Hstq%^|KBE*N2l^G3r^$Tlq(n#&>Dk({GdF% z$SFV3l$)w)v>(gUJyGbq=`z#JJ%bNY^OXUg^zobYhNY#B;C3Sp^7NaRIb9F!3RC&O zwrhaakJFssZ2P`)=D2Cf3nhhZOdJog=@OXqY66T}?5z&#?S?z&Pv=>EwfLgiL`Z2g z1QOGalD})lDRxjheYc1aYRpZ9@e8a_Pp=k@vnRpW9W&XOJAulOWuLIeI4y4L{VF%l zJ)B!~NI=K?FL~; zk$?P8y}7JXj)t>42Erv52el?-7|U8V0+onXm=t+esXnxweQr4nJGOE`zcZ(>_H!gV zb<7q#iypw3;ISaaU7_bd`+Mg?+VTxJ$#oMB9vH+YUR{K{t4#UIMia12-%J!Wsk>$! z<-!9_^~(MB6|?DC_rdDUR)z2c|JH;;c0nt+`o0(rc?=O_gP+DW^gQTO-X~m#-QRYD z>ROLcEkV-g2spCP6Q;+0#OTA*>F>+w+@e;>^w>p8iQCM&6V{UXhhI&+Qcv!1;_-p-jZW&QZd(qi(fjuI2IT_<{iT@>hZxaOqBlFJ| z;D06gbUf8UzT8A+pGvkN)dZ}&c@JFTchLDmyYZ)S3O?Gb0W)iy@Wb2dKsBjQEwQP_ zcL;m5Fe?0q;-hKA*Si5}zE?k3XL~h4mS7878#Y0xt(7F&Cr#kfW=`RtD8@F{ZFt zZCH0#V@&_3aQs>TTfMy)t%FMVgk7iKM1c#f2R4zndSArQ#Q@Ex9YUH5qwj;GTTbK0 z9ZvlDgEz{h%U$qBtp@yYIb0WG%1f~5(3T%*7lB>EEP%$w1-2~zGn5hk5WXZpO+gs# zIolt~=cGHg-@h3|Zd#TS_GXNObx953S@cQ1OwyBQYFf$L8n%Wf1})*+io@)}UQOBT zxiJtng2;)F?YmRWw8M&BUF1Z$t?YNkUhdZEC=~xHgVirvqwVd7(l==?yOh-jDlgVY zx(+99tm92l!&FbarCAH6o@tJ>Ca|c>M;N@W0>#*=2HC@T+u83}S8UU<5Ld?e0L>Q! zZg`o6NR_e?D>_a+Xzh4DlFmyylBR{8Tcf483;*FYQ#ZJ3tBI-g-(%R_=c0~b_MBnt zLD5NiS3g|Dj$TXZiNphJ>Vq)YzM_x=JzquKLfG9JZ@x*w;lp~!tFLs!ha;0A$MP8? zJi+|4ZuqdpTkJRLH%{!d2dZ+40WLb=P_0=!>4>MQ>3Llt9_{1X!APdOk%;SM*H!df zupG_oMp(eNS10&aw@Zw8iHZ4&u?#J1$(s#2gCEXl!M7d&m6EAWGAFL$1v zFaEm#%1N)AU^;av!~`Bfr~DAH?l8KS8MaOQgXT*j_$S{mo?JSOYE@;Ulam=?4c!me z3$Oa*v-Sffsk*kVJTo&KPb&%|p2C-t8Y$1Toa=se(W(TNZtsYrBAj8&x$V&BuaQbx z0edg)D=;7D>D_=N(`)$O_{-FXN{y&xYx*05WQd*rnbGy z*WDaX7#@pMS2$npuDZUdfSteYBJofzeJA_$_c7sePx;KG{Sw6;uH3kTn|Fq?u7U2* zHgPQyzmP`wg?+0Fn8zVE5Zdx$Yy}Fw&8rB4{qJu=m!4}yjpC4w+0xUFbS6bFGsx<@ zLHVzwA8a0T1%GzmP26J)tv0*B^eIU!^}n4o7Yl(=80%8VtOlDBFD7&2&aWBO6mM94 z4SO8#D-(Y!_8&UQD-LBrGQ5W_XP2PBd(t>GPiMlF-MDvYTfFn9J35T5Wa4|NW}*MB zeL(p^nyeM0+|SJkFpw!0NLbG|cJ+bO|BQLxnG$T<8-r^e0>j1`oN!y{Ozxm>jEkzX zScAL4NcD$D(l!VUDm9U;QRr`~Ik7J6&*eCw%^0l>>wn9Po)28hq}(Gwx(Aeh2avu4 zzCXIciRW^=?ajb*B^^}C8HwMKW3r-GAYNOM_oI5{RCiLX)Mz1zZBC@@%A zoC*&|TH(5>KIqu`ARg~`l#wq0(g*mTZ!WHx*GtyQ`pZZULE=SY&URHxmo-CxuEm9C z(9Q^DG(O=aSZMw#xPIP>Q@!yC2Q^f$dDOqe!AQCm{w!YxhJV|twSn4d*T4&WBUD zmq=H`^ATV1X~#`K`Fn=68+%00uM15|*G9rN>MNPztk&B>@0uk<|Bk5hC_1l`k4!jL)YV!&#JMZuIkP8A!D7LZ2fva-RCXmJAeI$Wi@-Gjo*TCMP4GV?eP74o8&2VP-J6>uyPQH?`13J_t;+`}LcVzGR=mZ^OOLUBI8tK6U%!4+pCc;)*W|g%^Rs6%K4e zn(Dc!PKh0xiEZ!oChR!rU4EzX8#A|n zO-ZG+X~21bZ;Ut!9fyCG^yMoovi%hp-6~xXI^lxbIF)o4kGpV}QC`9JSLMWg)B31q zc0ks$N+2&I6Tk9*_KzDP*YQe# z^KEqXuLc@#T^Ves+9VNmyVlCA3-a={ORgD8eT7)%~+8dI-* z6Mh^oW#ysJc|xZO-CGxjZq50`hF#=(v^P`3nHOP5=5a>4TX+ePgIwUkl2uOdsK*_= zJ<1y>ufV(84qWoa6~B&m0>6C&aBEmF;aGZ|2hCWQ0rdY8X&z{Q{U+81ZiMxFXumA_ z4mYV4JfHhSdL6YM67&jj-nEUCGsBtiEf=R~s+7-A()|cwChf8E{5wCnc`GL^f)m{W z2$K`29y>twm#a!x@-B(EOn5eaw^1LyBCj1ZXnGw}1~pK(b<$<{)lDk@ZlxYOWWk0V zD?{>ljI@Co{o9bZYB&;i19@#moXgg#AK+n&d2DaW0-*i_e-jag#C51=@fa!J<#`tU z$cH`W`^iuAhpeYZZ#C$YttZghNxGT|)Q4xRv$6 zr_U#(YZ6|6XZzO-0g4@rW8OgCjZ-~wiW3M8@W_kq3sp3c?{D)}3Rh$c?ZHV);fh1O zVEL9Jn&%eY{gk;PdIgh@cP9)ftMfcG7QxkUVxC1VX+$KA$5+hwsZ400FTA}XdL{Z7 zJ>chob`UrG8f!|=PEzf&kbt_sMS;yjGP|6iI#n-g>d55f(8&Ka7n~MQbVc|Bv|qcQ z*EFN&m<*1x?&SvZ=1aHm$+{$7w$zBUv4Oy5Ww+-ciTp6>0Ap43ctn4IJcziy$RFtP z+ZG>BJtZ(oBF)ab_GqAzUWUw$#;DuBn@TuEeDns$Hv?%YI?rGuP>+my6SPNd;d&Ik zqVtc9)L7>VP>+LjrU#$1NfsClq#5LzAAYD_TY>rB5>h>L@>4k5K_lPtxLNS;MYzSnOL>FL(aZ<#pI|enAMX%04eq?*t4MSyO*5 zO!O^;X2VW4mewdkb*`AOx!$0yM-$`;d7E6Kl16j*@Gr&&VHkK3YOKznmq z!|jE+v<7(Y<-;#ATL>{}Y`y?eWzR6hzB^~Qw8 zLhYJcNc_Qt&lGb@iu{lIh%-5PUHp^P4H7k`18I`FIecRA=o4@p*;`Gy{Jyj3GjhQ< zuUjwY`l~~kp2cMK;LN>s^PyUIOkA>n@Qcnbs=VO*Z+KSSocA@glt*4KWJ-udeAseVqEj zo7QO-QKw@XS0MEj$UoSs@AvH{&#RC9+S%6Wz1}qgVL<0`tYdO-g!B^ZG@}7+`dxzL zh2%CJJ)~9ra#(h=o76bWCN5pWjvYEEjsG1_y@de+_hIc%YnA$&Q1ID|`crR^`em&3 zx=D53i?k+?&LI!yAW@%==e%o9{GS1&2kDt+ZB^iGR7obP6X$d4i8v?20zB2`G~MGL z&u%n%ibA7i7<8syrx#q`_?CqY{lG-8Mc|B1ehYyINIpS~4L$3{BK5=gh}rvK&y*NE zSZ$#`Jop4Yx4g>9+w*fJ2XN8;)lMH8vdI%yd-Y^-B)WUeZmPaO(f!Z>t=kg}7#w z7c8poC6gAF4-82I^4rvltR*h-K*|B&ts0XSyexV|RP(<;d+AWoYemx}(kVeT0$=4b z`YWj?dkhH6;Qo~;ApbA=l}K2_1ctuodI~9~*tN%I(SHNdhRV*+kzm@*Qhxg45})8b z76pD#UkIp98TE$QoTm0D^d-fb`Se)~IejX*=s6JXGtywJ`py)7et`~njjV#p!wXP& zOy5g0>-5R-&K%w~P6E-h7MhDZjnJw{y;w;x8rZL0t6z}^O-yUAcR;Dd_mXswnHMf`Vur?mKS zEbEpKoV)s8Djpg2mq#Qo!{{PEIcku8FBPA!N&hq@~26)>&jM**j&3fGG&j)75 zV?b;h99nr2Lld50j$br({C$o8xAg+fD7XcAuaj`m2?dV!qxbf|XELi78q$#c!>~Yl zt|ZO7h(0~0piO^!oRW4E>`HIKO0#fyn4@A-UjwZFCj^}yh44Wg&eGYC8LU`m zBHjLUDR;xv#qdk1ry9L2gEQq(*q0B&2BDj|$D#GG{QF)w*OvCdI6Xja<8uylKL3LA z^vpDUR*pz&f-C4A-994|yr#DY7pm51A9{vzZmm?Ww38SIA2;|bP#hJ%)oysWC>H+q z-iZ#li?JcrY`}$Z=ydS_D{#<~rGm~_ygeI4{1+!&0*VDJY(7XGYFz;#Wj&DoUTv2a zjo}Rry6NTodu z@56;IEBMUBT`MYexHHn4}C5wcx_@X_}q5IoCYJ< z(br)-w&5(=ry~kIlerzPUxCbOtFB~^`Iruhgf7aRB8RN}$SnDaYw~%Ev#`V0-1}q+ejSib~{aS0>ef==Y zDtm}cQ&VZ3m*?h>aaZ4u+^Vo|o6#(OG5+n8jMvW4c{|aqpk<%4=&-5;ZuG0hherN5 zp!7XI=oJdJ9?Zvm^?{fVz9O&a9frwr^0c>X=HZ`g@z=S~ z_WWcfnOED_Af0w}{p*8*OZuUzFhDIarNjG-1q%%gs!D!cU>F-r-jNH}% zC@=7t-&sEY;cIYmI?BV-19{Y)>$o$qi2Emdz?4zFp~ajCOt9aKujOd8ZC+2^JIoPu zXIo>_rZPkfz0Stgzku;&15xk%MOJ>+5q&l!()lze+3(}cA(92b-={aRWsMoqSfQ$$ zE-x+KfhjZMP^-@oEW2k6<@0{Bca8J8H+I7(A%>jto<-R6^?5f;6TGXw0YkY*BYOIPMfduo4L9`n84zy&+s&QPrc&l5jb;g zF;2n$c>3`qT+=H8-TxabHNJlj)uY!?dkQ^MG&%@w?AU`k@na#gYeRZBjP@u{b*f)?(vie}n)3fxx+_TKzYJY)cY&Vmmi-PD(F9U3K>jdJ0t?cyUN4aHh zv?<3LKvmmVBy8mqrmVnm30csu!dvO}t{ATWJjyRvZC9=zodeqgMxk@y9d1$37?aP~ zKxI)WJ{r*#TN^f#kKNtN=PdEWF^ucPT~~95OPc@}G*3*Ozbak3&OU3;iKQvJG~X&_|SZabmF=!S1#xb*7cs_yd#}CeGjhL(p7$9e}&uZ z^QD|V4d;!Q!`eJUw)_vBnc&}u&dt=9%iNOLjv4i656tH9WA8U7#{s)x@TmQe@Ys{^ zJphO=RMXPFC~&MRX#vVojW%Dta)EW9={_OF z9NQe91S!T7pofjND)KdD?s|&n9IUoF4g*RTfWSaaBz600$@TZ^${t@)arxT{zb>?(SijBna5g|Qj|#yH{QA$%u*PYj@^<+M zY|7)XVaZ*jdc(~cjb(~6e6%W{xxD7IHfs8T2+HFIeGY#2lGT4p(fY3@*tPq?9!_e5 z6n}~87Ku-x_o$G%y1AJf%80kbc=-Ki`nc;!bLbG5uUzpvf)oedqx($h&Xc>uWgUnQ zjgdGH`dMsYvAXr^Fg`W$KKwd61NNzY{AYO;7nnyJ2j0^f0$m$Z=Kf<_*0fiD+$&W2 z-xIp%4{{^{iK70Hoo_vZc#c~U; zoybgnePj8}bR!Up^j6~u|p zAl^L0x#2152d#Z7#rh|+!%Ocf(PkFdVj-O?K)4#e>`1Qi)JV?W+Z}gzDJFgy%qS+< z#nD4>04{c>TPb7pS@o4LUVYA#|2kX4<2LR>%4dOt?E1k5Dy=2XiaEkHvaHatmY&VF zX``;1Ce~PAt$5lAGBj4x2I3ga#^MpTJ=JxxxXO)f)nfYu2)F`pwito8Nd!!$r0nuYp-* z6J>VzBFp}G6=)6da(Oa*`O-x28R?iaDC)u~n%)!aJA(ZyS_Q6Qd%^$v1~xswO5_di zaPtW}*ER_JEDo?{{o@(wGx+g_-v3%=BsF&UhwoD|Sf7=DBrm&eXckIm9@Ciq-FU%i za}Qn&&DF21HsIEy77}SaBwpeIXG#CUl(f-Mc5f%s%N$4ZDiE4k*{{(VEKD;|#JyfS zPl@72X;TPA{-e11?+b>0Y$s2cr^RbGE*Ch9Z`)>|_;&)J8{Va zBzzZj@7&)ep1mLQ1xVv_(&P&1NC?k>xs0rgWXs;N zH`!8(LJ~=~@?;Bzy3Xga3MCzwS$0&^$9 z@eKY@Sio9?j!Ms)pufzsd!3hu8*c`GKg9SAlUHA-2|uqEd@Xb*v}twibfG2!`bG3N z)u4~Z4~h@>qQu$M|IFw~QMFVxTC^6~@_RRi1|n!nc`0NL;j_4L)7fkk4&r1ZPqx@N zo`l_Pg%2DyatpLUcQbm6!e&o3T}fCh*F0X$<$UXr;#Y7Ok4mzUGpE(j#8K11X>{Ng zyKTH`MosFNBd(L%n2zDBG-!Xymz+o~Hom1j?KO<617j>0{zmI?|AT-o(7_G(jqL%0 zhX@*#oI9^1@H=$uL~c<3h1lL}K7T!vA-ZD0dWQQqdA@8PX!~Gj%Tx3&;IX(GH=y1zk0~SbN7=FXMM@mbDY(LJ>08+g-+|*qzXkQfg zoGo1LeUb0idvp5Z)&{%;H})uNJljxI%g%R$3YA(S^p~GxU7ONewt^q+o3}yQ{IMnQ zD-E17l2ji8?~QYfJ;kF|&5%hxAk;6o)03+m{3Lqg0@(e_{FSz#QnOO z;wT-Mq%^$*AB)dVooE=JrcVW|j6xUFq{o!Q&^I{yX-a3&Y-*hH3Gg;&XlV8!_L~za zc3UmgaV;9WJQe$I9SuCx*gzjC56=oq;KMZhonIwv)(g~66)k!Sr3WN5GEebwlgdXe z9hgqzTb9?LTM5|B=p}*v5#VE4{`p3V8gZ3(M%-2W4jwL~w2l7yb*@;~qWvJho=D+7Lv*3p|(drt;1 zuAu=hM!tKGN$6*F|2lYEIxJ>6!;k3V)|(P|X;zwI>G*?OWAbIbkl&VuEO{>MR>aYS z@Uub;{S7}9ulG3HP4sYdFm}#}5Zzxi;F;Mu=GA4|8p>D4d<&4M6_FeL3f$``KT`&^ z@g2zMA*!3)i=bP0#`lT>_ypekKr#NSvBuh#kZZ8YFuw16W`5xkDV3MM*uhJA6LZ5q z$mkw7Mx{`6f zRAX{?t=5Yma#&2zj~^^}je8eSI}+fDY71}|BRfzYjFf-wwLMD4d<-G@SRK8}WpX!2 zczaQ*??fJ&-Goz*Rps<$2}(Z-)gxQn<}$P&@+X4VKF`~`u4Bx%S#;jX5A%IsrRkMc z1rP8-_auq2mhkmF{?mk-6KGE=-n@v03<3T7N1(Q-&)6RH9J_1#J*=X<7a?EKzN~sG3oa>W z;I-`1AemQ}In6C2|03ThZos>tUR+4=ec^t(sfp8e-jr_#;r*(gG~l~jm9t8XBTpab zu4)e+eW&6@OXdJC&k75W$ED@#A1cF_Mv?P>2USiXW9L~L!$vss^h4+9P9a~F=a}Jl zOkNLd)tE!m&*Jl=)}$?*I)tJlPBF3xo_cu~jh&i>Ug}Qc4wsYAC8AV;^NJ&(p<3|d z-W8}%>15HnWsrbRvuIoA-`y=9cIAhwW@G|7Jh6E7vjsHAR{@PI;XzFBNn%*BMLN6~ z-&!WpHs_@Z<#m-NZ@UoZAa3fS@WB|dVivSQC9UXW$#q<>QPh!Wp1s44z#T@r3_C8^ zw1YN04r`1|l8gPu-VrMf=tZmkWz4-LLz|LUhzreHtNg80eZTM2c@g+>xALqiCsTb0 zeejc1CiE_?8sDC9m*NNZBZd2=HX7_yySkw=zoq;08491%A7=>V+3H8F1|NNpYHXD^ z^lwP!u z`zxp)z502a^A+(yHq%!2f3#47GX->-R2bnH8%fBKl$L?knnw2Z-l8{481Pv_ zc`9Tf>Kc_77HL~sD$grhM$VT}#ZqWn9XIH-FFfSdHsl?n&EN^;DaF9?!Is<;I@%aF z{1!dzwwJ4aX{j} zuKakDSw2d`=(T$$NDup3+8Xm=jyh73fj7u<9VC2J8ZefJw>!g!T1CSD71f}Tsc!!! z0{WMB*7SvcNS8yMuG1jDno74yWLmth9L^^M=Oee|scz@Q*6|%!c^{PpjJ$M4I5b&c z;j$KgZ|2CwE!NRhI%;=~LjV)@58(Miy)7Yh4^bwpl*))bU`U z_^?ie=N#Rrrt$aUBE~(d+>9>tap55zr}=p2+T6PHH?I3}7Trvn#oiYeie@<%>A{>4 z!l~>gnb7#FeEimn%h#x>z0pt5o^h{5jTWcqK)v4jhR>y?uYYl^)W;Z(=o=#Zv;MN@ zk+L{b@tVx&>0pWrZfTBxuwDATb>hE0F4NAB?!vJBLyg-15KUwH@uhj4Wku_XvftcT zIdH&oHceU2y*m~*=RJ=T)j#Z^UVg{;joy-WES)Zv%ql9x!guBiGoH(C$rM(hNRRp?l6&}QeS6i?yhp>M-zd$K58o7J z*Zh`M&gb#fzQx4%;YIjoN<0@RcZ>?`xujnxu!8%?7vf{CsZ=y;s%-FIG#4$BMh9Yw za^W$BDBR13Uf#^6$(5r7##5Wwr~|t_ze6o=yybd*_S285#kC~~jSU|cD=q%X0M4q^ z(BO97+-^;Rs5{^phYy{|7waCzJ-Nu*b^K-F3C;A8G5L+j*uU=P&!e2Wy#$}XTbV0P zw3RI?B%AR*{%tKq>$Jx-FJ%(hI^7Y*GWn=~KhEua*HQ})<~)_D6qK8s-D>)$Xirz{Yzhs-?d*P)ug?VwYapS#%0&c)yj_AEXrBE6zA>cP~!7q+Ol2i zu^(wqzH8H;6NCKeNx27ln?rS}V3kDqU#|l~)gk&a_2LG9`Z{ig8TC!^(KhJ)pP(eGp`Sf9&9pEj4+^^!(! zyP9?|{gwP93k%i5m`nX|za*L$ktJJKtEi#oj2;ExW$D?sstx zKw|r}p+5@J@RVh&uJvJ|<>F&eJ05B6Om8>8#g=$WD0FQsqo#2EPBgIEeBASBv-2_+ z?q9-PZ$C0_qDjvcq4CW zy;st_ROwW)2VF0-N}l`eqRq{Y#95wYEZ?o>{@C-6Tp@4@Rk-JBq|7KlbsIGo{dRrk zC#81D_y;Z=*4@!iy>{t^S?}>Lk;g5J5qp0`<2%LzSFQNe0#7-(S%?md<=LfM$c@Pt zSWBp_?tzQ<&Be1f1+{1A>+yYuSyX>Pbune~Kx&fDhmw4UV2y7F&L8Z~z-Y0~6eW&) z@TSDMPMrDWI00iR`%(~p#5$jv-&RS?pJ1Z+IhSYij&r(ag}{#l%%QmXOS#|k z0$RtWJ$U-uUnEX8qTnOdDY;8Y^1k9}elmZJ^xNUWG|!7&V_O5WI^r5Kb@l#)B7S1_ zf;||+AoIF_@$_}4jo}-Nb91i`HG+St<$V*Ma1qs5}lJgaA@7{A??(f``@ z3R(PR@mWfK`k&$g*E%%+~Ub;Km1YDQ%s}2B;E+LF1jo(eIu&U_# zHD>S(otSx%s-2Z`R!;lJ^Cy&^Zd*(-fi)R5B1doQ`M+L$ogwY?YUFXj zL@|#Xj5!D(f3^uSpq5S*SW#q7E-u< z&~2*>t?SND17_mCYp`v2Ngf%Os<=YFY@I4@j@Zr9J8zO+cMDQnsj8-~p9>3hpM6i1 z)Vc%=$Kfsbw3RuWSyNKJK0zbtsiOI6Nj? zD+eFhikdBBpnka2fc(v@cWj_5UfsPL<`kY79v_%6;Zw-LuyZDYmJzvDWyRd#>&tYxArInU^J+pUB; zqgH^R7+j7}-<0pa*VHKG zskF+zBg&^9;ej71?Su$z}3o^sx(z?j>Y{%M-cguPj;X;9AkA!66g)P?V}Rl~D71 ztL8Jk*5UIqJ>#GlSnV%oU&9_%{-fA8&tE@$x{}w?*ju!{#9_AH;YdUO7^2~i!IZFh zs3=jQ8t|YJjXfPD^{AxN^V8ymo9eh`=>mS0@P zTbvUVN9$@%X1zNPY}^j4!}-5%R`;*pZ)|--qiMLBO4~6w@?*y}Ap7 zpHx2x=pAa))(+f&b^J>=(D1-{-2M^PPWtxJF3meHstvk8xGteIuFbIO3ZK=Xt@?zHeRUjfSyqrRZKTpaDh_c{4wzgiQZ@E@ylSYTfeZOUZZqI!K4rx zxbzjTI)0Tg*Szo1H2L6BC3ek!0MC$==8y4J7(Hop-g8$DtC1wZ`P}=#2EH=%Crz=P zLOafuYdzWD?>C8rFRWB(IKu@)5XTzXz)vvukk7qbQzK{WQ^wn&ecr3WK*fan}L~ ztTWwKIdp9~-o1CfT>K)5Q)ZhO{GoR)*Fi35(^*&A($)4k-S;g-6Iyg(RmYfP z5nL;SYL0Bm%7Zx`t74S;nr#A>Xmbb0TY6o2f{!Im$RkOA7&XV~nJ>ihE8Yy=r^uhl zmY#@hSQ~mVUjA&8PlFcKx_&>#&_i;>)OK>_fILfoqpoytnM{10Aq|A>t4zCjt`~==2{9TsLbAVe4yTSrEw+h!Q8!dJh$(6&}NL zSmEG>839r(oB+SNjiHld%$!?TYnu$rAKUC6?}D}~Md4Y_TBAxE^y$WN#d}mDu@T{V z(Q~umn-c^$%B(cyw`LW@q-71cTXJjhB)F3S&X-B;cQa~@lm~{NA@GYtol50@cMe!< zg3b}M`rruajYTx*WC89o>Sk=9g2^Usd%FrLZi4<&`rq8kuepTJB={&PmlPz_HI>*k z8vXN+6JNLH3cekwe(6K#pH8gwvfJDb&?bQf{EdW%qJl{UG+>Ho*{ZK}eEW+s^NMK0 zeXp7MU_HKTl_*f#8tRAOJ!rN=1do_lLYtZ4!qB#2=^{4`e9h|bO0%Xnx+<%;p2MGq z^x>uN*I0PAZssM*&Nont>Tu3e&GQu7kEsiPZ_wf4%;#1L$~~FI%WljRxKDx?<*cwx ztmT!`dxGu0}iY!?R{c2)t zx9du4p+5LSFEY6oJ#Th=J4eEo8OTz^{IF7*^1VAU7cp>(felAK{(* zP?|*PWkUU#c9pNp$OO30DIetp_{7XEtd%?`z#IDSdP9ZsKbvm0kj=gYn!$+%c${)- z`SF65 zpMq<()=(=PSo9`1pp<~W!1sS+_i5HgUtWz-93Y!zty3C}piwlHhwXdxnF^FDY&8A7 zl8|+ok(=>--zA(E^jPE%dq_j9K1+B#1{doc=J?5sK5eO66%U?Y$M!9yTf(G@eFQ*PIu6Xtgf##nGTI-(ZQ$9%;<4C?^DE>^{z7^N7GeS zHowyLwEGS}BzhWIne$@i$AUWw785q7>{q4_)kO-=7{mW``z03ANt6_|QXsvZI+3uHWG_UL6m)5ykc(gk{fftdu) z$PLVigshB}uLpi<8GTO6*#2FVe#Mv!lLe9FHzfQBC*kzG25YtHgG^u^3A(pg0>gxdr%9?Q}WKQLk#5?kvlOww177fTEb3v zQI?y09;<7Sigyof*`>6bhT5j*;mH!UX(%lM?1q+3(@|?Q(RnT{p4Up%u=qTnhT=6~ zbcBvNm-xFHCk?t&cK7i&nhzbWych%974HzVhWtDrft5E>S(3^GfLD6<*t1gc2XK;X z!=r_cvm&7dWXX%S%-7y;71QT}pP{>;%gn$)aj|YwR(?%+iZ+M#VqNup;k4TWb=6uN z9iNOE3AN;bZEtT9=p9Q=2fUHDYK~-;>+W*wPVm(#J2fi5Y{Q59)n|B9i9E;hzQX@1 zSu$jJa{+E9=+V>!+c=AVLLDN5v$te}9>aYJoT@8bj_X$%jKSIFBT4f(zGG?r{-d%5 zi;xQlXeiS^(?0?4MaYaa)Q;>tKZxg7y=zwQM=oWy`Ta^|U%d0y z-UQ4?Bjks5c=)52BHF5w%9(lWpj&)rl8;vJRZ&tN%AxrmdG%9o9o`!KX#)&sp*$D| zZaU0bF?htShZvq(VYH#L0r*p{RoTXX-xrF{Z(NKOalt2OTZ0ySm7`9Fui9N~fY4njmUitDZ{Y&Xc zi>n=wkz?%n!%&?kpS&rL3@EGx{0tJs3f|Sn%)~yQd+-QBWt@q&CYM&Ym*R#^Qt6@Z zE$$mok54`uMem2_r&VKrnUlJvd!>}$%QX|SD0YfB2mH)OGcOd7Zw7^kcExobG9`-Z zHArJGV+ZdJ{~|-+0p-vo@@dyE)T`z?IsD^Q`n~Uk4DMH)Z2#4w4rLe0yqf>%)o#z> zE6rQVN5zKlpgI?+{~nwP>D)l!R1tB-uLB zL9?D$K{N?`!O;`8i(#KD%eYUkWxrRO$#Gqhta{G{KWz-8UCV7We1^1dpWo;?Y!x*= z(uSjV`=_OEe@~9d@#FVd;ne`Na= z|5y$_E}fo4aEaoDIL|qpi?-9{k$xwrVN*w1bSIT-#y`cqZlZR@u5r&TttqF`9KO-| zIG1w>6w|V{$kyka`Cm#liF7-PIUs3QRyS(DvK^b7H|2>fI&r&8OXP?d?$lXW!UeZ^?(+q)q-qXDU~T5=dL2aB zQVF7dUUT}q?G=3vFj3nQc(x(GjlT7`m)Wm^mwA7wdgd=9CY#zv?-pk!htlGKr)dif zqYh;@O1TMZK#o*lE7MkXeL5@6^T!u6=0mPV^jffrirfAWL8Zr%^YDwb_0MQk zSH{VmM<^oxs5BmX(4R(5^8WH8!ejqmE}qqnTW6opo3(C2-3BzE=dn!)f0q?{cITh( zYiqWDy{UZ6etCGE&VJ=4QH$`ER4QkvT)3_f&pcoC*w1%ucy(TRJ$LQUi;LY7nDurRrUALw+Z~paNMQrU$m@kRf8sB!|{JJ{*c-OM6BDt40p)SqY(}=gwV^Q$LO34Ff(zoY- zWs_Oho3BAM5BiXwk3RC{klz1^O>W~?PWDOoC3Y&*^nUu%6A-5=Ue@S2(88_umS zmGg*Byj(2?j@!#uS~!a-ZR?|cum{umw%n?SP*}uv>BH%v!%j--iu`WMN%O7KIrQpI z3C-t1fAY4gO6YkS^>vc`;}&j2o9M+JYYCCW(gFo4e|klKg4=8on^6fbsR!E}nEMR8B3lj!(QS z&FFvGw$oT~uh4uxGij?h7#qUq1$oYMsl1vmGwpLg8b@w7cy|QOfm&NkJNw}oqaP`t zX=e(Az}n!5ZlkX2Pw90h zoTot_aE5qzy0n=+UcRm3%@;*24Si{u_bvI>2NS!2zql?3?{_A}88*xltm=i*!qdX{b%rgD^xK!-pSB}vSY0X;YQqI>bs@(6eD1epk>Dwn#)r!*z zSSr?jjiobtFHytqVZ6~;C`+&KrAvH)j{fJURoxOzghRITAX(#)H&q{$%hygG6yP}RRrx`f`%^kNQ|98idvJjM zX5AZ3t>U05Y(kxLgZF{#|LC(kv$q?UU)_nzbh$#;7ggr?kj32PW?wuzUzU!IXig`b zhjMI_655-ARYm3g6$$g9|84M9RJm^_2JiomOBUP4;oHKfS*xzRr^8&98}m4F^bUEY zWnp95S!-=a;RXb5lh4=2iJn_B_9UtBh%q#a?#)%D0nx%JK9b&)+| zkw~df0P>r1f}j{uI-m)hF|FomvaKNxkz z?Bp&6z277zv}~XsOSr=0jyVx(m7jFiMB~||X<~s&3_is;2hrMwBcM&fxrFaV)Ke(% z>XWSJQ5pB=z`zj(PHO9Bg$vX^qmNNvbM?#ewHPwF4xey-lvX~b0fWQv9Bmx;-xDFL z*I1tm)kJ-(e!&tB?%oNle*g(LD>IgelG{Umwiso z%e|&}*6i(bkwS0xkhiQKi;2x&aB~}$IgNfP4##t_Yrv;=JY&fq&iTGl&dUBozySdb zWNz7EEy*F<`1SgWa$(%Iv?`VQ(ClTi!C{9KuFxf;5w$CB)@O9w$DJ!Wa;;jDCZG5s zp=Eej!(>vmm{GSV``xkCo@uFdbSY&|-@;#N*EAllykxFe?kIP; zF#{ofJGwaYy7{I}JH}Y>-6C!IMeiH>mAO-le|f*jC%2~I^1LO1H~3EbV-)biQB%A& zrEQXa=w>p5o0X0-LmP_9eJ?3YW^cRAYVK%NgOb|kQI$oxE48`Bse!V1_6mC2?-PFK zE5Mz5=TMzV(C9NOYTy|1UDH-x%^zjqHvdGIv_7Ls^1v}q^=H@i@y`!Mjll=PB{;@V zy`1~Ijv6DH-K8!!`f`hXR$(u?ku|7%?jI_>GKMexd_r?du2s5A^+g)^ft9AjYoLFI zvzq&jh83ZAMVDL62zpSq?c9j_jEbgvIi)q=9(2eVxy>eq&w1Jz4L2%gSh6lx}y|L|rfk^6i^Db^W7MIzX}g{FVE zy4?6Gzt)ZI(E|wDiJY9OX~1fN&N23P8AN+8916LlCctN>XXr4ywRGrv9^a4RE7~n7-GxM)GeQbPMmC9S?gqHhxf0z5Dt`9t{ z^lYljvjIx?%bjPN%30@nbJ*ZWa){_micghhFZg>l2eu!{WxA&F&$kcRaqV+f8aeK; zF3b9TP#j|netS*5SHNSa*9+99KI&T!aaoR~Z}7y>g#vm~;R4}1xr(D<(Lzd-gbi{d z;3s1YM8p>CO*tn7cz`n=+mDpEX3G8Lqj-uve`)-{&j}o{U*>!-p#h($a^G%zqs=yn zI;7?WKGUf;*%o}NSl&rsUK!qm%XHfyGaF>^-ivW^R#Xu!e48yTylx_oX$O=CGiTJ< z$t5~oRb0cpt4{z1t|$F&XI?X{2v@0GU86;^9CD&a+pc`=2d_YVtr zAPJ2tFkcp)toNxPbsbn*n`Yb1@^_z$$Hla>i{Jw~n@6YillV?jT(IP!rwI75fwE^< z(-8kseJ-9&YS1lvG8VQ!)3B&{0MU0xkps!T}eRiC|xK^uPnn& zO_xQ{MPs$Dar2t_KOu}mJMz1Y^Ix8J(-oeKKJtx}pHR938bxcfd=CS!CHm6nx%q*4 z&D2_?bY=T!cVmC)lKf#vFXe%#!Spi1*)>Fz=#(at{UbPf*Jpxe6+t(xNLW?Z3Lf7} zN^5LxUffVT4R0(xrg?}J0S}b!QXYmAGcJ40y^>#SvpzuXukTQ2e-EnvF@YCe&Ev1H zBS~>LdR(UT9H?*%`;lQy`+_zGys*+}1YXqO0mbK$R|$B}Hu>x{@SyHY#ks=O&u~fm{&ekRL(}bWM^1IjWK)ZE9GTdM z&>vb_r6<@2^pe15fd}lcrY&4<^KJ_b>7p>yP_>A;r@po0IMZVXLrdd%P^>G$n87PZ z-&i-QJMkdJB!`HpZSZ?WN+g%hou|9M?ys~D0Ye4GNuo~0`>n|g3{G<%-i2^4657Ye z|8qOVrRLYsN5V8OTc-BCD~l93iC(NgoAj#;?LnCh?Rj$O4vS}oM*x-<0*2L~4&6F) z(9MM^SK$2L2eJA*=vrarrSZFmQB)-VJ%+Di@GQSsH%{p{aXoY)D<7Zx$3gWn?I^ro z4A|w(!@gK)&@D#3nd?-YNq9^NeCFe>#lUrQ3GaiiU*riKq**I!F?5pwp8@ZGLwI-w z5ir;&-91R=|ItbV#>#WZEa4-~@O4L_ALvQXP&se4Geu8aO0CZhVE8z`i)X~}nX=%q zEPX^E_JLYkK`!61Rq=&>HNOKbu8y@yMJpJ}pFz6|WEkda>1znRE$TM3qB7ka4QK=9 z?G=XecAwgc<5a&&g^~Gxy6}MUE2+x+J`3mDLP20RWjgSHF}C#Ofau+j*~ z1(Y9Pl{tW8p{vSjN8)<1!W{I8a9kT;I&gI@!4ng(K=?nt0q<3vYS+yVJm^m9I^iwE zvS)`$aWFC=b#E%0GG^rnJloqw)sz7}t0|p<`XTU@@(cvsMzf+K()Je3md~!2)Q~%A zMSZH!lO9{4v8`!xDE2u=Ze}0WNId9*^X{6o(!fKq?u5<^{SMtfhkL}^8|X;`xtRf+ z5x{nWHxMcp0ydbG)=|FQuiOWMCpD=&!MeFMcYm5 zxofFGXAkD$45t{%a~dE#f8G>d+zJ_4^#_YL1BB=suut3xn}nPvSeoW_)Q^o!;G}Nb z#rrLOs&^S!q~=^8*OHyC9+OJD?8pDiM`-lCHBcLclx+nuVTcu`DgSBg;oQhCcg9qQ+2`ME0O<4x)|Ch*7SR_CxIh%rE^t}BG)pI?O6CtDpn_R0Qa zf`)Q1l-^gI)UH@BLKY>3)mE8r#w9*wA*i3?9f2BD{Aeh>J8;Yhc>>Qw{Yp9~kGn=H-NniisB2Q5 ze1GX^zV>&4P?&dgTdMrxa)&njJZOftGRFtkklywo=;e5&hoQ%9Xq28H;GZqpROu|G zo#0LBTvHe5rUItfk={bptNr3~CS0pfdP(J3$lMgxT6|Ap*ik++Q3H>hRvr&Nf2Bo- z*YntD$zQ&Yy`pkcIoF|!@VRh@niaIxE>Z+lAKXMca3ux$?Hd>T{Zk-QffvF$WAvcn zOXNY|?ihA+>qW=_E%^_ym2O7_5pp%N(vYZKf{$ciHJ?tZX#8#*CY5JWf1BTPk1X2u znt+a$@V**yDODeq`5TdPoO;ABqhAL~BO4w|E-#!7yx!8Y;66q!3hi}(PrXf|kUs5H zuSj5=rnC%lE)CjW!cS^S_qo4mL3ARWQP2GUnKAV3Gg*B|1$eM-689u=-O$&?6>e!0 z7M#*mCQ+f6r{)_oRA~k1{jbXNo8S?R1_w{W6OE90PIraM8sN#u?$jRU&y_js?gcKE zrSZa!WsJ^7W%?diM84Y_CKEF}%+V_*v3>V1^3Cf|+8=p{!bjGkyfFvmwev1CcKILC zVq$r%=8v5`?@TFCKr?a3f-zjux4GV9=t#5Eq|1CDe6TEhVJf|ksiD2!;lT%HT%x*} z!E)}$MS7=VyIGy>aJbqey-sP##`{d}{p`8S8`F^b^}bD+<5%-O_g^$8Vmkl(Z>LnB zy=(gq89uAAbhti)5AkF*0|iT6>s$~MSr~C^In_#;=;a`JTfUl+H{=4xjWy`7b|aSIWLz+J|3jq z+aF2N9{b%4NRhEA?$ohyI}s8+lXv+fOSf*ibe-oe=9Gu_=h6z5Ys)nOAm4*P}IFO~dxBTebwi=W^0=gd|6xu~>G zE8^e5kX2Vuu`EM=$t-8oZf!-FS30?88b@BrpkLKpX!hgNdj&n1 zvO+$tFHPUq<_eoPySae%3Q=v}CCW;{a|O1sG_+YcnOI|{?7V9Q#(EW9tvkVtF%d6M ze4#JqH@bcI>0bT6ujVT_lWgIlV{~zPIkru+{|~a>H7Uk@vFF?7)4bAW zIYr0+m1@3zgtpQaY={>{^q{V4WKZRjBEw0e))ku4ivVk?m%dkE>`WV~o#k0`nF3dDR(&XFz-#d# zI7ZdI{I{T}VVxI5y1$7qCwjoeT+w*c3=S~T)Bbo*6bm*u@%tSI%)Nh1r#>-#sYcE9 zv}E%|cE|o|m|yaG6NtX)%8l!Hq9SAc_3qQB=>dg*i>LSM8n{O)G(DKc6{^I+eL9Q0 zsPj~#%_Fg|{99K2fL@?YTu8$?zF7N zB^~1@j?dg|nS0MYSBp17r%|qS8<*}&zMw@pT!mcXzT}>R$7nl4eQFD~J zssSw(hv;|xA?#SL6^(7#hMKPnCxu@fgBHm67U-uI0kj~g0pl~Y#VccJT}PqtP+PIb zmC{RFbD!XIl<_i}mtU{I)_M8Fx`9~|J8C2giH934_B1bJeg8CW8z&!Ei;yS8SxCjG#}%l6+q%?wS8;A*by+mq8%xEbN@;iU3$%~VF2@}*>Pv%PvKMM z!E?)MF(2Jz^xQ{W80VU}&f6{ZBMbQVFFWm$XL)Y$-%2P%9YI}1F%cR7$0{`Ra6(1yUmkM`S zX}MqYr5HSV2@i?5AZgMMieENLi~Rn8zHLktzy(1su7seo_hZ6fLhsN;W6LGCbGG?QAFBJgw9`v?^>(+KnJ$yYtE$U z_7ywIgGs;5E;}6gv3Gk?KJtff5JG%OZ1o7)MGJ~7~#aV9=1hq zIUBamuSvUx`)G8Z9SY+l`i|>wT&ZvRa>MLh#jY9ZnO^P-kn`RqiGu5^aF+#}s7!i6 z`Kq=J2Yz}gpc&-4*iYs{JA3o{^Eu}3wb^_#H3#<>b=Qk>ynu?yTKx^R5(I> z+n*xG&VDaDHaL9_QuSiz*oD0QU$7aUVV?HNN{ne+(O6Wd zrP(L25-oMz&ZV8s^Xavrw9kDGr~GrJ)0IaNFqKq|lt1@c_u6tyMhwZtcUIw6Gk@#r zOO`NZP6*+wIXQeXZlve+gr>X`>usN5|8n3B|26EjiGL??<$L9+M7Pz{V9FMLo=m*E zcsRZJyNaf`T4`_XbGVL67PQYRg%9AkjUqG1N$~@G&rwF-QYFXo^yF(41M7_kSesY) z!Y8Tj5929^nNCut7sUv=iqK19r|%L9_&VOA`34<2&QIN*QQ=8-k37A%SD^ky`zfad zxSrJG$KTz_J<8To*9I;tD)h1K zi~?@ERc{#X6{6I9$<`w#u+lvPJ{?1y`SII^qh)4LycmDCHXq?DsM*HUaeSVv&^MOO zS#Kln-c7XF$l~1WSxes4ucb)qkWV`?^{mpC;!(RzitiLg(j52tX7ydrLB^k+^SDNd zqFm=)6@`mxY`ANHWYoCjn!!2pVc$A@y7CL+ie_V0uPr>fkE<5lt1bOF`c^=5>i_>) z#1B`1vxlN@_LNEM|U}j*E`M*1*HL7`nMA;>2s~YU$ab>TV3+}4}?te+e?sVi? zt)kg9y|d_aHHeQL$P!Asbm-Tjx4|5^y{Yh?tYJDAtiv5@ z6$9@t=fndEbTq4uUhidlftrO5Y=a(c$cL|ZlG4+)$5dwYFn9X3fid^G&CSZ%YtN(N z#~>F2xyVIDzG3RmTK$5&f$XUEiSdhcfedXKvyyxS?3nq5xjrm681V6>7gcAONSKNT{L zW%MTSo-ysu3PRoU!pR31AJ(bvY>4Vkz$dy(_zxWu{~>9DDj7+dcM?!n%G*8pz(l4rM$HoJ|;q`bqg zcx6BAcMZ*+=w#ebPFdjW@`O zcTBBm;p0xl+yyi#`k)XGGkMdhBE^jTqv9myS$P14-qXN$sMU?!w?-I4kD40fjuhZ} z6LcXd3~_z`j~b50UYMu0q)weUTw$r~eY_*TSl>r9O?A?XCse1mKbn#Av-RM(EHNao zrRcraza_`Fa7O>fauO4sPy@+5dncZn&+?zX}5YQallZMe>75}O&z z{~Xdozce%wU*(9)Rhx+DxkGV&NQcJCJ7Zqt;L(Lh;aJeF+jOu?Q>F75x*l9PLcDGG zQdaj{mRj}11Nr;cQhwB~3bpF?fL~76WYDgDvcFw|(p)rl_C(-G4?cQ$6D=)ML4&Sl z@Pb_bFTeJr@MeL(8SuT*-Q1f>4)#UeEfL@*9ejd$4IyY22Cow|xA{{AXTrR)@^uOq zMonI-@K*pgw0hSBZF9|m$7v}Wgzl3ICwuUwM;#bioPoUx&ozbD@a$B&$sJigB81@y zy`bO3GKbbG6ENR-x17r!JxJhwQzP%b?6svi{BU*5eJrD27??yY9**;bmM|4PUy_uE zurBGt@Z}sxkaO*mq_IiHgDYNQDf|JId8Xla_7p%lgIfr zx8{5JtalAkJh$f787_V}Nl#i8LF)##SGj>S(>75)D4D$i%5bt*cYQ_NItJee=mbV? zLCQOBys}cjTT6IvUY=nmw`|K6={`5*{y8qdo}ujfwVe_0^(}#;BzhaU!bxr$zf9?7 z4SmPpPQLs5G^d8R9R-I{?3{&46Qk}TX;tM70$9wuCR~v4Yb-o3$mw=jgliGdaNOO` zADA?Ql2UTj{TWJg!dEG;BhWj9p5WIt8%UpxM;QHM4r$a-D$V23$y#%LzW{TP$wLR- z;{g@!NO(a~Se!4iEw}jmGwo>B9>q&K{GtTz(&7SdD6jW6`rQt#G6ij! zLk8nI2-sk$CB+G-2hm(!RDKHnsT7sXZNX)qoR?FBKc>!q(btI0H=L0Dh_NkCDZi)s zoX_1WD6>)mb>tfww7CR7)6lNYobA>^7I1SH=y5GJ-xCIpSiF1K*_JwVjy&o*RsFZ> zKm10Zt8o`mt4~Srk@8LgPZ7nnqh+2-;4yzoKdE|AaU2KDusy0cqws*L z+_S_@+U-=1b4uPNXfA1A5E*>W0}Owz{2YOA8JUY{kozoE&4#Ry?T#dzIcadX(MA@E5r5FwX27MkS}-mVJUdsVt0<1)*l9 zveM@tM+{K;mhj7)!f)+;1-uxv5cc`2_8|@aMJhjzaS_nfq%w@w50*+~8hkH(12t@1 z9iGu0el@{#wc-Ot20@FDB&$3_N4-I_v`0T)rd>_`12*S3lrMTUpszrtr8KwFJ1W1^ ztF6z_{il_ZPqk^JFdo{DO8dN0wMW(c?A1L})~M!kLHKI+8NQsGW*&xd=@KRJM!^;YA(C5 z@-TILp9**df;X1%O;qjqT>2NiUV^h}ck#m9dtDMK&AO@q&K*4A2X6i)KYA=79YcTbT~@iBIcj@%WM#4J-P*-~jzIPlt+bA+F^lE|S3{Sj6L5ym3p((Q zmM(tF$b?vBC~B@1KIev9G++5qUY@NV!~Tn_Yj!_Elm{f_RucYAAXA}Xp|iQ>kJ@zj zL~VKM+M={)sU^%OU7~qvjXknZh*0_knxz}E(;}+hlwUUc4+#YZJprdL0GFKQTitfj z)EadtHDs6SSG{J#DpRu)#jezE1GSfv2=*$d9bdYaK6(@q$Su^oTk^n8 zzZ~glp5UIZN%?F}sO!YOg|CU5OE2^M7q^7+d%!J%KB11)O;Y7jF2Oi_{RbC8TEZ{d&)qNso1h>w%U!ddBc6Z?bTtIJ!rH`jfR4w4?GY8*K%nX8mKNPo9F(XO|nIqBqnzR>Xo&NpmH zyRc5`N|y`bbJ97U@GMrCrydnUwg=0<>mC1(qw9{#=?mj36orhUq#=}~6iW9zx9kxi zBO_b3jPT1A4GI;Zq^KynD5LIqZWNM2BxHoFkP$L6@_SBy_>k&-&wa*sJny~d$WE@U zRYwaQC~3MK6JGr05#6btYBHI$p#bNy-`tXB!fr`Vp$aQ7#@7N04Ns%q-7aA4I2Aqa z&PA=mM$A~JC+b^&Mm@D9WR9BGhgJKBfaSu!VDRbyh_z(2{eyb{?xOzB-e_z)0nI)&;Rf`%z_(R~YVE!_ z%&UmNe}VN?>(<7QbtVMoH5dSu-=?3x#&oFq{vc9~g-%vSh)G0WYoCsC<%b>*k z04&n)t60|Cfkl(&gMTMvyZSfeuQy%e3HLv-QJ90xZyM5Pgr7k>(*%;U2Sd^VE!pq$ zE*!FC2gdoe0ssCF!H@R6ubRc=?A0d&C!?=-Ug># zpndR;yJAh=U3BW-4zXpaqLtARqbBWVC61@LsiOpUgwrHBv3R<-nrXt(kXT z8csF(#^^ejW#2n2l*d-J@SMDTfhl+IA` z2HPG-fsHVhs^+%Du6CAKVr(QIdo&LH|1{^07F)3D)T68pzZ%Oq4QZcjljc~RHVo)G zz`Iqg@?eMp^RMY()V7<7sW*Lgu(LJzdo9G2U8Zu{>k=vN-(m2sngH~>+~-z2G=H7S zQxa^Xyw1g}3=Xj56Hef9=^nG)MOWE(-!Q3irV04`x(Xoc$p6h)iO>CqDITZ$%XQ#z?WwqIy8>>hTa){8Oq5=z_EsIV+XUw|`-UH9;+z z-reI+y6h!bwm67XmsnF=4pb9a^XwO6?MXYSbeRt>qI-rmH4K2t<39YZ&Sr`7%;tSf zXF3T%7#DO*iJ8+DD0a->eHtGZzYA(J&YSEZSkG;*@OA8Ts2F90 zbZr#BK~&RyxX*KYe4h9ns*es-N;fRQ9KB0W7Bq+YZ5=FsrK9@Oyj!eqbU*`G+a&{^ z@1BoOO*cYDvnuv{_dCdAFER7FDKvj}geBj42vkQbr^Pk!e7!)S{!q0qTg%kDOe-;p z6%+L{rl7As0blQW#PSAJNtw1Akn#l; z^BpkvaSP^a{Sf0)OK8r~MLJfq!llEnFv)t5JBF^E$WvkuNt)j(AT<0v^Xq<}`tc0d z8R}xpGAkt9!O{&?OuN7i+cijk)<*AE>!J7Xt>Cda7W^Ia z*;7*u>JK+G*0cx054A%z-Jg_hs7CD^12M}E!y9R%OUB^P79%`GSkQYHk#Gf1fIP1{3|8$u-5(S?2~@Sk$)R7SiCPmxdu&q7sUBpXk4&z; zGLY%EUc{_N-($(?bg!z~Qqi?s&FTG7{7JrilSSvO;KA$O;_LKP?E9ShvcYHCpUF(0 z_QLAG7eCZh`xUO{yWF>l8i0U>vl-=6UE_R#OVe#>o*aVF!7liqiwWg%BGz0i!rV9e zf!-Idzpf_}=3&Kusw7@-muo$YjxGeBf@oY^63nzej>6;<=Lkb$@%hv<@NMf6r20g! ztFEH;$cr?F_1_rmlYSvr~=w`$HD#JO|Ag_7yfmO^ujiIqWC4IxGkmK|L z=E zc&pJKmq|%mdEV$hJUOKsL`M$=2di-A+xrV6t$?4sHZsE-n~6&;u=d4EXmVUbHfVGi zGAau>)tTTYirsNu6L*}gtZG2d`36BiUu4wVpq1DX>2;Y_Ru}Hy>!xDxAPUOj{xQNi z!kl0TccZ<)KW*oN%c+haCANd8EAI7U5k52uL#jtOd$bYi713vjMo)PEz;s5rlqoj6 zq*F)I2M-kL1H#}^G-!Jd4YXV_F*K2D|9ZkbA3o*bQ7;(vo4`&{&$NCYM!83dIry8O zQ^I=huUiLUc^&@@zB~)lj>TY;;~P1}015ALa4g4=cA+4&5nTs5jih}n;RXf_&xgvr zqcA$&6GTpumNp^{v4=49opeh}AC1j#)V&w=t5P-XHYfrSVVU%@ciO0 z_+0VQS_AXGIs##@(mym4=-NQ6ulB-4bXrZHThjN_^3TEIhg&7e9VTby3XQ_OUQL3j z8k0e@P?fBfE=R4C2PB8?vEuhw^L%|(Q==N{M`);mho1K_lvN!n;>Jz6z-%b>odTJ*|L{zX1=W`!q?P2-XU=DNT;4)fvU(Nd z{jbkiCP4Vc+wg5!Bww>^4M@J3oG^fs9%UZMnVd8f1}`mhS=;d>S~hGA zlnZ&$&(2c6!(PmbJ1R~y7!YQ$W5-G3yt{)=b8`d_LjPHI41`7Y9#0HRj5+HDg$3|f1A#j`Px;}Reub!(CDQ&T z<*-J+!sxn@_zQ^FRKgR1Ur;fnBft765F7^BLdDvC=&j?&v>*4zQQLNic%U2I=l7}P zA7o$95?G~Fo$`jT@lI4P!%=V+X<00rc1dV1HR1kZTx(=O?>C)OT|!R1^UR~X8Uv<1 z#F}?CSY*(i_up3lm6P|t6PqTa$xSf&>^v|wzRF3D3k}PJPaqA?on~+;xU&Hx4(H-p z4zCSi;cJHC*@JRWf~4&tO*W8 zj;D48WYTLhAH9d--}IUZsCl~|5S~bahe?BEk+(pB??R6XEkt~w%+728cQrQxaRN|o z6oJh`@6{xo=9%vh$s2LfV!ZRejjX7v0@Y`Wp=$rNtdi9=nD#KAg?lDq_~vmE;f+Fi z8%STk;FLkIR=XLmJ(&vRrzFB^=c?5mkvNMx40WW~Zb#weh##r{gD|sIg%o$@p|~Ch zFpXjQJ^q5gMXFKBM{lB16GwX?ZH{G$CkK!|x06JD1lSy6;m3c2k-I;uJYU6$M`@ii zso$0=YqcYwbowX|8iQgAMy?BlZYE8wAzRuoX8h*}Yk@(`BdQY~D=%Y}8x>yGw<8bh zZAzSqj5Gs}qCGT9eV=fj7SDK2^)Z1VtoBotGHU-piRy*>{Y`-AqmzMN7paD%#NAy* zuF=}E6X8!8dYwH1#6^sJkaYV;Aa9Y|n7gGeP_)zc5LY#WfH60D{KNoGcO$x{Hm>2M zO_aSOv}Ed0Ruym@D6ewa%14Z_jcHwRM*S~zUzcSH`s?gxi5Z1byqE9{8Ew}>zl`Txc<0K+))2MaiTN9!;=~<5T#t!ax4Gy~@w(4vDVSN@K`!hu z0s|(N(6f|RBu&~Ilj6WV)-)Brhg2uH%Qb*LKc9?OA87&gq432(T7ZYfuEY{oSqfe^ z4=xo1kiIj=ih)hkNpZoj#`^^qzFYJ&c^*c&!!)!Ze|SABJZdKNUzz|O*B(o8#k5zD zMkrXeIm*df5C;9DeE`!yJ3EPMXT|~5h2Ty$ucjAxmOP-?8mr##Mw2Iv#kC!~ivQ!Z zW==U}!5&#inphHCKs-yDuQRvSZK}rjCL-kx^wcOQ@|+@k4q7^0WqHjN?(C?mMxT8P zSIa|D^V=NB>|0A!XqM+5Nq8zI1bpfZN1;op*O+$01$66zK7ZhR1iH3w4K;f@W(hv~ zy>c^X?b{BC>vprxlQsisR>28a(!U>4-3bh2gR`I3>2a!MaHD6-NCUI#8}r3XgA*4} z-tEwK)K#q9TZ}QQx@HS+6WpgOZDIb)Z5XEsH#dEPPei zz(Xj-zd?Uv3Sz8p|IchZs&qjXfftX=1kBz&4>Yu5F5%hJo;udxl z9fqcJG5^VUdI3!xMxs{Mag3k64T>!6)Y{v{tYncdmQ3;n@-(R5V;#Qe872uFCvVTZ z(mKI&7OA`rzbh~)i*T7{Gr35;kHptXfYT|q6KfUGKU9xMdIw39quavHb+1L9L-Zh% zf^R|G%KaeXKke;h?qm9%`*k13f(LaMaj(O^H@!EL$2-lRcG%2FF9B(OnD@xIZbmh) z^*9*(cpYKCCo{aN!J=pn)!;rS(V$rYs~B%ab=HWy)(6OJHAnHfaaqKL3mp;=REz&Y z8)p;es)X;PuN%Va@Ue8pL326x-Zs(?eSkCok}n6E%_%Q>HdYsXu|zSi5IT`wm!%!r ziK3>oe@#Jex|PpkySb=&6nIMhfC+s?c!1QCywJLz=rQJFvl1L!s9xK22H~0ACblM= zGeuE*@0Dn{?geQbU*svS-^!HMH# zu?F+E+G5TDVLf)DgJ&j?*F$MeI=K1Svm%RBX>^FLOdgU4&$=zVAKix<1VWdlmGqSf z7l3pq|2#>XFoFC!>9;jw%ecde$3T7`ce(4)yz>Hj-6;@#M6-=Fs+V5e>qdW>dIw|X z?89#xOjINHA1t$~FL)~l1>Yg{7ORRO{jJ*r*ZtOHq`8D1<oLCr#0rFkr;Ig0R*P`Y(3(oB%9ZyN}WP4S^x z+)4cNS`u1RT=Uv4-NoDp&3+q!i1EDE_c>uAt{HJ3zQs$z*VSo5F$>74wqe9af(L~D zB+YqB5^JFOh7z}MC(P3w$}%2gap4812YBV;BcPMgSj-zh;IlzxN8z>5ZO>?#d>GB@ zuGP)FXqF-LKMF4_{KbQQJ=i?UnF?t;;uJc=h~}d-OUn|R@LcUaF5Ynr$S2?f4r&a|_U`P`RG&3~oD+9w|JzMWWuN zGb1L;G^(0$GpEkr8S(PnlY0M3`T3)*0m_Fx?G{;Co-^d#5?J0m}F12>#`?c!hd33%}o zSk~(gnEri;W6X3|s@HO6OXs}yZ}b!fvFF$|=oe3zoQNK_D-|D_sm*(70#?5daF@Oj z*tUr#IF3a4)+qwFe|nKMLEjX0Z(qd+yVh`jGdi1ic?w~GWJvsdxANA7(K6Ks;@Ol8~ zbZsv0Y4I0pK1b1cY_+^R^ERY(f5d(|ZiJSTOl14wJy`YiCU%%!4$0OjaBJxdJ#Q`Bo4P3eJ3Q2&90TT3?-l#)6NHB0X1Jx+0~^|wu(@6%QF~Q) z^f77!?UQJ)p52>xecv3`dT)u#$COXFaaj$#-zUMk$<6TY_4L&rHG+@1 zIzTa&O=s=lW6ZX&&0k|gj?}EFA8{6)lliaZ651QpAHeYqwDw4c?Vo0%-p}3m@2ZE~ za??qq??Y7iIqaLh86#$tKo{+uuy5g3w%g7S-p)JEUr)XQ4R=M*nNmHlQZtLW`@T{v z8r@*|k9VV9aS%KSxrw(rUStVHV_*Z_r}%Sx3ux8W9yfn&1GE-tb+ct?R@)jrmww?Q zt{=K@U^Qv;>gwRT!)3NFvnyLMO&gE7-GE0roz;EyZ$WCdJxr$Oc&QH2D>D@5bPQwP zUnR0(%4P2UYdOS(9E0>NKft+d4lb+;XLPL))FhKzu9X#wXe0IIHhC*`e1T=9S4Bs06 z2mOcAeuSIcF!kj{ku%_uF|Hi>3|D5hRc#ZZV69Uhu=_C|W6v+auN{lo3e(9jWJ?aj zNL%pDt)4)6gd+hGtnRXnXZ)@J*Uzglx}`#UkIm$tt96m$gB`BLG2P&f@~wY%Feh48 zE`R%h6cf-5PKLra+O)^vP^DGISB!Qklsaum152+H*z@Lerq@3ceM`QHKIYLbL*?aD z3k062%uQsq|(kRfl));Cr1|H|ta=!#;s2-Mp$rs=8!yDFN z&CILH$&RTQ`TH(ZeH{)u5FIwvGG6;3&r!&R%_(v`VylqC&ouzk{S zd_%M6;pHu4%`4?QFQQBMX%N_U@a$RT#nS_vYMJo- z2|snsN34a@`{2v?6`;NHJCrX>_uIpV+OTw;9y|UhL8hGUT0H z#Mf|cIHXrOLQ}0mrd|1((=}tjyp4FbpgXRaRE$&g=ArZ`(na8;#XNn4Zq$gdu9DlG`kIzd1T|{+K!LQa1lnz7QhdlG5Rzt{OFZK5R+v!iXc3 z4s0Y-)@){NlLx}E!8>s3>-#`;K=-NK6fr|n`m84%#>&do&d~hraqe^aF+Pz_aNQrJ zyzIkuIPmTi#k(W*ng)Hw-IfhW9R-2|iECNsMUl|zTRUb&=i~^i6uM>OvQ_NWenMp!<&>Y-Pl`aM3e|xJ3ou zyXkD)C09!L;V9G*qL$LGC_L&9uHHOq!K$LKyHQ~GS@Ye*gM z#{8Vjj(qV2m%h!QYtU-=v)D!TuhBumTTXR>RDYreiSrt9;z5yXux&92#Cv&8&BS}H zTZ%sBruT<%>JRD4v7In-%5R~)B%vcZ!FJl`>^SMf*7S_XZPE#J&(4$QO43{}PCdg# z9StEC5c=*}jVp@R9r&`bto3Uoy)W%AX}t@doOp_dbF75cfOSVti@5WM)bDKN$Z0s^ zdM1+QV|fwhz{E8l34^$j*hbVf7EGw+RU6w9|5XaSSFe2U0p}JQ;NtczuqL!rDGV5f zLHmN}Y?lo9uA5PZ%VlGA)TzUxfHWSfT>hTZTCmdE9b9NXcDIz>K=lu#M-|d8il|TG z0~gYx%t>Dlb=$qee+mDP^c-RPUnD)rK9~AozNQtpc25>M0V4{UvA!L+G}+@Gn7ReS zuEkp5(e*6f^!hDKuV^K7EbiLfO6k#T1fv||fVD-eZR=m8cbdW2C$WSby*Mo2nbmf~ zS&6hVxDOhGmzwXub6qQWX_yva!wW`SgZn4>Fw&j;=Xy&NTpv*o!IOtOljhDx;xc^g ze1>_t97gIhm*b^3xzmwb#Hl%;d)x^KkMJkdaKdrks?SBBHN(qyW8|E7x%|MpZYcUK zsqYndsXv>$Tl5py3=@k^DHAq%LgQmKcxCQfzO~zJ9M%7^(6%t>U^_0bK+n`lO)p49 z`WZ%d?u++;$1aPZcF*Rl;%+NIXLJ&OV=}%@FB1-7^6>M5=aDpY9e+~oAiWkGqI;L- zfAUmGzrpYJ6OlAAw)z&&sNW^hwp2~NSg%_sLaZe|tI4k8njHNxb@8LP3+}~7*pTNB zAHtGM&QQE}A@uuHmn*)Wda09fUT8bKDu-d=@a$P{Nn6 z>4m>}ODA1**scstITILa1@;%3R_IuvDS*5Y+??H&ciLi)q{T2YEdyWePiMsW%8F^5nN^ezj0&MW-zqi< zZ&Sw&r000fyT15fu@$2}Apf)!OTz+zG%5PHbyfv`zF%<^P7ZY=P2j+X?dmRzekTnG zfBrULeYUzVzl8Io|72KKl&je09m3MV6O=9K$Llz`QLlkKGBgPT&RUY*Wh~Rqx)*sP z7xHHmpZ0JvdOB%}mzj<7c5=czZWw+*Xd<@!!ydd6qybvLZgImi-GFdRs*DVVKC`TW zG&Or@8La4Z-9ev`?IXVx2*i6>+I}$jjoGb?IX?`NZtW0yfl(d7m0Okqn;^kClJ%wg zh6^UFhexX)!x*hkTwu{s9>pk^Dt#YrxJ`vdQ^o;#64IP=TvNTOfw)(qT9XCWb@+4= zDvDM@z}bU9UWxR9KO?@EOHZ}p^qNTCRv}%F$uzmngWhp)giXUcs-#~;|0oL$6F}%v;cFMdX`w|W!Y1L_a8yXcI*szJ z@po1@k)hCT6f+d-usPrbLX(q6;A=L$sPmVkebs3jCcw#marpIty%e01#b(uVzS=XM z6JGL|5F6aI+zE0oJ1FEUQ7@7{JN(l^79Q5-${hA_q6ZRBz{uN`(x6Xn&H?``ozjK32!{WupHkJ)9+cCq~M&Ukq#)`VWo9&q2aLHui}z zkgf#M1GwMmjzqHz`OTC~Kzxjwjcn9Ei~g{P)c-)QxV{?I^?^iMoM-Q}1FbFXuqN#- zN*YOQ;g&n}bv!3dQz^a@X=Rsk^X;6pLe{}~EwP7x80(aJ5z7af%G67I-;->C13~|ir8HAr`bQn{B6HZ z*+TcGRAll2UfIgZCntb#mIqFFsE`lFS{)A4Cge-61Mh+8Rl|olY)|k9i999#Rl%~L405;)Gq4RbGpu1lbpWe7gy zVGy&qwfd)!W;;l0&k`6@wq`Hk-D2v;8Tf3ou1eg%8Y<=j zGmtzuvskJFlg~D1G_yk^M=un(8EG*`@GTP@NVtd%pE+o=bc20gmCHKNxXH=o;L*b{VrT zHdL3lv=*8bgx(z2(+^ksr*hIfxMbB^6tltpOJgO{^#Y&x$CSl_|7Du(AgzN@2_FP#$Y`0?#R{fxcGUQVgVV{&BqRhMpjFuX|bqn3aY=VD}-4(8OxZ z2{DIaLUWMDp>r92@Ub?k5>aKY9(q>t)iKpZAC z8ME>o4K#aEsSnjn7fWe2npda6E(Ko3BLNB%nvCj@Ei^Qj#k`Q>Nb|6!vf;x%+_Anf z3htsllk(36q3edLRR2>Is%hxkfx#y0R-jelMjG5e%6l}Pyj^o;V7HU(iV5w()T2$^ z%u~GXmirsnnoYt7hbl(P1K`~5lsfGpJS6FG<~qSi%o@dP1xRZE zg_twTql%BzdHLWrz6#CAxL*HW)Cl)Yvk)^h$X(eCbhHET^R`=bpZ;ASOr*a2iees5eMj6g3Q3D_ zGj|DFZ9SBAbNmzbyZ24Jr(!ZzITaSZm!&4p*xATR850X9V* z!`h=c?0nZ5(ymq;l&XQ6a^^6^J2qE%&{hN0>tM2!zh49QcD9G(t&qRUJBSZTkp&Fg z0be{u;ajIU{6%3VOF31h(7&-L=?Ah+cX0pj-Mp>ANqWY^g6E#3&wj#RGmDQeFxu`i zEI&M+o$;CtIk)Zc;g3D|zR65w*SNtrxoavq7Y$UW)jtNEM)ig4ZCzldZ#%Wqs9a`# zr~v&O12A+8!{p%o{J{Mbyz%ZXMpgvjW$9p6#_GG68W#$m&xG-d-}Ynt-Y?>{pkv|^ zuAF~AmsD~b|fH_=AHC8MoM>tDKGgMcMTXlB@rwfPG&XLc@K>no@PhCm5Vs@ z8M>=sd)R+ywbVpCHKq%=|J=-WTr$8x7J7JLE~2zX3paKCA{7rmgU7FIS61qt=UCDR z69%NAM$4Yk4BcP2q6dQ`A9wHz#~bi5k|SPiet<3PRHBDvbqCD7amd|HJ4Z@*2is;-iUp&_9I2*FZ zhszhWm8f1H!R+fwnBY7fx8HDsCW{YA6^x#1`uY$a-8qa8=AVJNZuMc+@BolfaCm&#vRxc*U*8-|Ox7`LkFF4Xk+VrJ zTCp1!_DFv|AK+6JeOc7vB+q#E^Ta+BdFdUk#aH;(mwl)0 zBRWEKv@^pqdh&W50KC5-9?v-XSysx_tA?weBU840+>wfzNJ3m!D- z0@Mf0W1EDlY<6Je^e^mVSvX7?{{i|I@6JHmeu0W% zbu?>rZYC~FyPCECw}IM-p2J;{=ZJqyp0e>DT+vFal{9eIQQ$jzWBtINEGBLkA9&#n zQe5SwuCs8#tWV1^FZu-E>!$WwZRX3SiV+*9~g3O3u_OuP@$)inJ`+{n==D8zkxjbd( zcE#fTvfHd>T1()wHPjuKcH`s=9aZOm7^oTWf~%*D!7@97d){{ChW+wr&#)*scKZuA zbLy)&ouWBka5-~7>&ikx0|%8H`zGCNbP84-s#W@_C(+LR z4%-^o2~3YpWuL69*^e2TGSxG4I;5>O=urTUf2TsxtKM+8V>eiE=Nz={y^!=+3mijx zK>l=%f)23>&_Sm&91P2VQ?J|OlzrzQ>g@$M+o^@x{AqtZF3_F%|qH-2(mnv}5Q zBR&ib2S4NgsMb}CT63PS9y=1&X>5n~O|q%>zTwB%0q`jD30vJT38XcFRJT2t)7P{3 z;q5ZQk>jHGMh{B?JrOG9)JrMu4fc$6m@a7(@w~;zQNPI*I*%?dwuT01b8;%it@YnaDio4q5)j7i$~C!iBXs)qfC1FKq%nX;w>hr4nvpSF=`_XwjSR-`+(2e5SFAC82PC z@eu5oSOqDjuleC$|KZla6c{e+amu%9{JmId&}b|Flw~G|8h6JdADeTk3H-4=M>_PA zpuvNqB!Mm6+_ZhtyE~{#HhxZ_r|60)~Nyr)7T`>b~51* zc$hlk#m$BkuQovZs1&bShFgA*$BLYAHhb}S-1~YKJkVMTZYz)C$eB)rgI3Ua%sMnN z`O7xUuSfOg%o_A?=O?PJBgGG+nxw&rs$u9F7bE(bJ4B3Q5y8{xbzVWgF6Z!O-fS2? zw-C>0_hL8qS@jaXr=4>G-mW?Yx1V)^lk4m`#e(ZR3&9snH6e8ZJ^OOgNBZHulK848 zUegPsnhNIUJZV2J)=DOy{Ff6B$mJUkh}xIv=cI#S-32d6+dkWIiVybPe+T|ejKX1kEzs=iPrh~} zL&8Zu*03K?4P^-&c8zO;>&xPK+urZ_r70~$Y%sP#Zz%H5ghBgg|KX%!>4U9G>+dZS z-+(vm-{fFXgv8aNu3dB{R)fHpc5ECcEX8)~6PVyB!d|%kBA6w7uP2wa%VEU1xWYdO zs&;pS$mxDOxwT;(7nI++f#Nl$+rQ=Y4_DyD^t&#{TLGhd%PsgyUbgjGo%YC#Xuw{$ z~msNZ(dIXe@!fu(I5m9!4#&i4nK1|tMk zv&4w}tk{PE?9nXRJB2V|AQ5-zpDyaytF5pY5vgW4VF^~OzrwCuZGid%%{k#H|74_(|5w%^M|3e*?r=sB&6EI!!0y;=Y`rB*sh7llR}yZ zsb`seCm)!gw->V(7~}NRhgLlOd?I75XJ&=3apH`gBs&QZ0f5bPW{JCdT8_V zmeK6el$}uTRwd)l7D;37tb)HkKQW3kzKJ(O>S2CvmoafOX}4NSbe4E`SzyZBtYNWxGQ!CoN?b+htu_TyF<^++nCYsb4CPvKi# zYtcWd^~(1wTC0KlZfuUo4@=kcfD6ZiNsk07#3QiIb{Ll5^1yE$hBMN!v=@6G7jYvT zM8ZLVeVlp@pFrC>oEBKFzw{~AOFqFJQys9&h`C5y!R_y#h15!Cn5{9gj=xD;qW8wn zT=4jS%ou8b+Kb-^Ds{diMRxtKDJ`Qr-GD)|Zkv_Ks>SyIjpRat@^A-HX+AtvPM_sgm3-0;W z;U+(I@PXh8dTuS>g>aktu!xa9V}viN%jUU!I_;ZHuY;89tUld_BGnZ3EO~@q_(Rs^ zoH^@T9*5uSJ>`Nwi--3>fBQ}lH{t>5rs1m4=WHASMb#q~$L5+y9eOz$i?Vtr!UjBv4jj~Mm&YLtQ+(3AF_co247#+j`El*u8|Ma zbd)KNK$ys=ZeYNRaS*?#J8qlmjETcGkfv#Zw?+>k{nLeV(Tn(eJH>4}zZca@CQZ+7 zKReDNTU_Rgzy_k4*fX&X(J!P~vMv}kh46_O;k&CyI;^y)gs^#|7EdUaVR$DD?Tq3uOV<&B|eePI{twA?@lqoR(_?=GFPG1 z@AVH8S`C(uKUSxYyL9L*3%^Ug5*pc<(|L4`!qcGO2J)1QdWP=l4nxvRbzV$f=ZeWC z*MYRDEMlgdO=Ih9uk#I7&d}zB3kW|#{uaJOwgvKEKzyK*wj&KZ+GTvVEp$)!1n$%D z6=t7*C#@fvg17xF`OX6=V76*22>d--&|dHzlHSF*dR^qBn;)R}WeLgO5O+W0#2c#L zCnxSTZ4x7#0D=GH@sMIean=CpcYeG6KEm90E~2*6_xHyUM(t$5Jxg1r0O24gXYEBj zkY>MyVTXywsu}^|8SfBlE{SX9J1%0-;OiiC6X^i(Y_$cwwS)NBFLzkvAx6HvLYcP5 z0w#K;Q!dI$S2s~?>nU|TBy!i{j;2KTp|sPFVd1SONahtS<;F2b$iJsS{9Zp0^}P7n zHqsa~<@)pCwl{`V#*%$ddvZ6jvYzfac;I3Bx*w)^Yu`lf#?sV!sBhSbM){y6BB}2?) zo#D+861yUKQ08Hp&c{>+)^U2=&h{#4oI35_r-7bYkbIl(TQ-;v%FvQYuOjgZCiOO! zy;p2gmQ)ww;*fqwemDEs>8~*Nk5q@t#5Y_uJt%Sj)W^8hz!VdGLYdIH<}L2P@heS` z{1UFFvjF_xKc-sirBWS&r`sUdF}4Y}w2cwHsWxb&ixf{)@DO=M6#9F=dw;oe^gz-c z3B)(Hs-A`kzn*|_>3S@Xf2zB7S{HQt9>Etx`U;HPTSz6Rubs3y7a{8TgUsMR!~Zsrj?IFyt2mT7*(^V@u4qy@Rq2ZS%m z->FAXFME{oyLbRlZP2_Th|l=93k>ecI6pL=@9Pi?#&4+tiF8X-B=*KBa;usHuI`P&2jRcq_5u6p8Z;M-%1|f zzriSQXy}d+r2jrD`P95%>CvZ(z^^tZvg-7X(5*BO#{Wj|#+bP4EN!JRD^fI6F{>b-D*Z5XA&k60 zxC*=xMgz@FXx0^YPVj%PNk^&9E|cy&C}ws*nnKi^;02L?!pEbcJ~6LnQxJ2*mPvrg@{7 z+u+}+)+*^}F@s=YCL^>PeeDuFCxtKe4x*nK#L8X0z_j&Tp<^V{j7azhbIfg2ngdEL z78cZ9(|z-{vc<=ly!Y$VATW{Udq5h34d`S+SUwZxq+4*2m%++toHA^S(8o;Z6Vmi- z%n?QS7-?-(5K?am{S3lmp3%;vJ%m~c@1cs>*z)l^1fV#H~CMpW_7&ad^$HnqB(^^^8rp?w-vhwORDDIdbuz}PRNq1m5?OyGskMF)0GmI?2& zb~@~5G`m1rD++Bx{LI80g>X<^G-qDj41s#E&KC;ZPjfvqop>6_tI7Ijjv)D3;nh{) ze`jmlVi&)eV&#E}Ncy~v6DhB}>f+)$UETlkIwifl33~qd%fd2N)2yPYN^@JZxO9l` zU0GH)d!>1?612LATbNY1{2rCBxEzYYF)@$f(6zJdaJyj1(58#z-{K0ITLeJEMV>IC zZz*tV13c}&j33?>3^CPH;c1?O?D6d$KG5h2`rniB+M)#PZ8{7ZM2?hfkB4Si4%>u{ z9TFk3YbW`?$ve=>Zvi{JtpRG9(Q{BsucPHKP4)V4FYrjS#F^bT@w|TfalrDCj1O1? zhv~fKuAdQ(&hq6(`=+_Lyvt+JhEuTv*Mu|G5jgb7T}*7!1Mdy!0*>Dn$re5v^C2Z)7_NX0gsvhgNh*z%K!ux8qRqvlUE@g(+U_rK*I(h?NCu~JFW``r1m(i(LTG4hibn@) z!=DL;Sh1u57_XWR7n>PD??FcFe0U2Uc%?1Sd&80#J-OpkC+w&b1XnH0pl$Os*8ALE zgo3r;+h67*`i^IG&Gfr_vqW4@M1JDkheu#zfAZrlbOz5u5AeUfgN>zqDn5PIg_u-Z z=$sS+Z`FpleegKg+^U$TxYd_ka#l&_Zuo=!oUPb7{sX<&0gBUAcEI`-@0t0ZblRae zdYB%9PFf4#d8mfm`sowi`_4aRuye9}uFno;-Pi>>y$-_YTU}rb?N8>LA;a3a*O2mZ zp`Tw{Z2P7hs3xRtGz&D;n8F+7nk%c@=EKdg9aM^eyz}Bi8e<-1<;Ux(xdAt^*QiyP zVb~12cg*GXGrsVzz5C*Cz1_~njmxpdu9cip?Tj=3I}1l2gkX}B2KB`)(4UopQ+*xT zxhekWbm1}v^@xMaetDRk+gkLFyu2cpf%jTWyjlV~ zp?U zgk#*_=i*vmaK0H|@Pn!H*K25h(iILm%!T)Pja08!lOg`mOh_MF%|fS-hEJQeBJ~`2 zS{;V2(^M!iKFZTKn{S2_%RgUb^m;MK2lNNag4R=IPD);Jj}QVTb2LpkCqgjW{0J{2tcs*aOS;BbBoO|FH$9b#RhT8cPX2 z52-6$fpA)Ck#5EB-Lru&CgE`RXJ^Q=pff$6(FePhf56i6thjdgo4<$h{DztTbW}Rg zyw#-V3+(Z51>T)w!?$Z5;-a>6Y)04N5%n=}r8p`Q3=R~9sBexFO5TJdR*9dXp2 z!>}v)DE9d?j8pwHs+BrCYNI}70v8Cc!P#yANcmTB#RmPl9(e1gFo7Gng~qtxH1Vdi zkDvYYop-R&k_A4zvaHXF4?X0I+Pg|eA2h@lCut5&`AlEmM;;L0j%#f80Oj6lK5|GW zIG( zMfzc-?9C|*nQAKP2_Gyu$!|4JL5H2&VaBM9@N4-XbZNPZMT|&>{aG*Zq-SGZs4{9qh)aEwi*x5?1glVStZ7uvHPEDjBz^w6ONojTL>jAUWU!4 zl(OBGla!d?skktrk(@oemY+Mdqpn{0fZu3UUa3%zxO~u4vGbcOKIHu!Mp(-%PR>Tk zKQ%#<2wT!hX(SR!q6^bF~jvb57jWra?8Ey$2#WFtRz)pK_G@CXH1P)l1&gH}rnMJEg znZT`}9wz9xBwW!h^F+N5bgycxk$kgpDN=2TKE+;o1EKqZ6@WX}^2MFEVS(is6D?^HQo+uo3; z@z1RL)21*a#hfs*smm-!XKZ@(5aVU}NSu;YKYfH6<~9attvLVAX43lZT;x*JZo5M> znMR*junE`<>Aji?UO+3`USL1Nj|rYzY1C5nXFCb&V<7*Z84}*WO1jT@{vB-;wb)l{ zAN+0V4w=R^_&X;bk1yoVY1cb^UUWuaH~;eWqPSkXyg5#A4_ke=6(bD6KD&%n{o@bn zIGu0`O!8*n&3)s5IF$9!x`eC$C+Vu=s%W|}2qI#jpkiPGCRiA-a~8W16T2HbU%L<$ zTMQHnL{UUVFo3&r7Q`;>#Ki6vY`l_2QIfEw#dX+{N=XAzHe32H|)lX8)qo!>{r| z$8=dF+as4#pgqO1meKNgP(uRl?4Z%C)EZ>5_p_Hvgeu`<2S zCY4W76^~29^Co{O1u3zfI(X$Q{&P%$UdQ4T*Mr$R_ z{s!<=nB)J*XULUM(-!-uk*wbTuXj7T+OG<&Ur=3m|DG(NX?g9oyST2F`uu5|P$zy^ zk9Gi|H_SQi?P%wmS9pfJA_G6<>|1Ssi>IWTuc2>kXh+mbkuz}(^kIK~>Y60#xUJwW z?e0p{9Zl(WoF?TzW}w&j$h>U1cYUa=Z1YAay@c1%hlbcEc(oNT{ShTmCwjNY>JnUS z9u2%ey9cMBU)|+}hk*n!gm7EI_9J=>!wHyM?|C1jlRzfy^| zZ}XOF-fD(?lI<7WRAVDsM|P(7JyR{Z>jvJk@^P~2Pw!FR96 zh%BPWcgR_6M(N`1FAZ}`rw?xPy?HZ^$5u+^qnqb)mCZghw|z-n_4>RT5nL{K2=6Jc z5pGSDN!wtgruB+&^D4rBYqHmzjZyRZz z|5Bhe(YIli(%v;0pTVcDm*syU8&%&D_?us68N6*$9pLA0`MYO0t(@RacRg$Q!rzhd zV@fxU%8^>VHvh5Z`*h&abaB=BE;zlb@@b;xcL%ZJPB(t#nUA6y%qPFtKhPifjZMG2 z_}bp8{9nE17>9-ojV_>H7~H8ifTC)7v%+o6g;*C;jjPv}&|Fo`=@(BCw2x?fZ7vsz zz9~vi&ytv5Mh}beW1Pt_IXJ7 z!24k@OqS5)sOwVbiD}e4%~fBO8bihJ?-3sdEN2`~Lf=a0UTD*fT-sqE!_U!64_{4r zy#-qWi2ORL-nf}y9ruK>3V`z zZU}t}EqDO@A1)PkLr+n|$=@0M3C+Ju78|=qK=adr0++=22aQ_eWEGNuuEy__=uuKBbrv$#S97R`gvmLvyF1ri5SYYzFSi0eP+9U))St zb$#yZ<~-OS%l1pqlondCkZ|yp^fO^m6w*!n$Mzv|JN42NHToBEukXKS6sayw2@Ucuqoo zK+eN^V=O;$o!gD^dF7Ewl;I2ZZnSA+O``<%;K}8JcM%L-M4rJ+Wx5X{V4RllreH2! z6?f~4d3>XJc)aEpNj|cdOHGD{aO@IP_y!pvo|R|RyuIC*Dk4ae%O{>GB&|iz#oeJk8uy?uZ0ZmkjMub zj%`-}I@f`s?4BFo5*bzN5Jkt#rv?sT^!30oF>zD6%4uX_Z6{5v(Ii(pE3KLj_sRjY zBy^F&8XnW!lo_74_~EHUp#|gnsGwv~=)4=DZm8Aka_m&GDj@?B$f@LuRz5nsA2N>p zoDeaQV|FKTYkKe!n2xcPLEt& zt#T7^b$)u2CV|gR=EV!R;~-d;UV72%A4;Oz^+LDl(szFrm3szet52mf=4cJuTIo?8zQ2Jd%Qh z`Qod0%CGyfV6Q@cvkv4p0jMvMN+W?7qg zZI4)a&}XH(F3y;`gW$)MSC?0vd(+o)#~D~aij#qr+QrScHPrK#||Tk=4mf;zGfrL*Lk8M_IbsxA6hR)^P> zz*eJUn_U9<$Xo9|kt(+;zUDqHjqj?wpa2FN$YF)@@DYO_G3L@lUPTI%;hl3ci7rp7 zqUZDJ=v@)p#6rm86o0_;7csFgwOK0A3-{=jG&|>w70L8n#vgT{eyb80-bisYxkS{U z#HgdjwNJeX9+3SSpO+pFlO*(^s8;hGqc?=t5(kCr1T7>`V*(ifHwxOQbPOSPt{{FPHJMeW3lAfpqb)W4{AAvW9dolQ98W;UFm$d^`Ut%!X6$PPE>Dy2 zb>@I}-V&Lw%8#HAgDAtZyJ$Fh8g%tP0@rFGuR5sQ7rB@na>~xgUNQ~m)v@x-&CZ0( z8MW!A0}C1c#XycE;Nzsq43H^(g)!$YmmwrBR4#gsCV-AXlmrMi)3Jx!W-IA zERXqYSp)gxyR})p;CpzqW^|y?a#lGD@?52{wMIcD3EnQ(FL^{PVC_>e*&*>Z;dpFX z|6yn-Dt*F>!2kTdrynaG%xd#V#Lk+@{q}7T`8y4gL*lYTU!SG)`oap@@7S6?Wo2>C zH%YRc$P%A6zod5yedYCk@!GRW$y(njU$idOZ&J#|i9EQ$by4!_8GhgBD35yBfv&Zv zO151}8gzWAtmuD({^dPMKW`(Sn&d%R$vYfcrG$29=>gwin+uv&_3p5}8`j8qpUrQ! zmg8a^!I})DDyNxewZ1|&u3x$2@($v$+Zt|v$=(bec7*P=!?R0k_wc^JbiVJJLIY-3 z)n}B}d2!q{&R*tY##FdQI`*8rUHpNxeh&@#YzgzgM(nq4v(RUU@uF^cMy%&OaiNI8 z9WU-@=Qs8ocp-;YO~QS{#Zu{$4A!%krO22HU8ud=EaSY6Go5gd{OHCL`Ra%bSD1N1O7Fc!bH@erw1Af$8S_Ts zd_||8Ex6;eWWr8_jO(KMbs{Y7=4Z+~dlt&b{-60*{Z;%qa2z)-e~5#+`f!y5Jp0@7 zrkq=G1I<}6P8_t9`#`9y5%)Tcj-%Y zk94DimxU-_XOl6OLavVI+nysh!Zt>%N%17ik(S^wRc;xOA)egdNDiTP?06*2*zT~C zqI{iAk24$SWW^S|=f_5ltKvn4o7CkGMLKcypa*ivW;eR?d@Nxu#g4m&WgqM36!3hF z?+TAc6jx*io2gC7b?AEB^A=92buWv49oF%dHpgk$_f^v7;ZpkbVLkoVHXjYU{mn3M zT%d$eVSIv7eJByC(^#}rkvX<8+$Sr5|_^orJKz&w4EJ} z8LKRd)fmy9EycC5Vw&-#XOkJdnp!`-iuynwR+a14@Iuf z>wS~>{K9A+<#ASI6|^_2&Oc5~J|36NX5N+2!>j1EddADoj@g3S)+bSGuo&*RiCUeB zms88&zQ6poI{M5QxRo|kVvQWLqhKH%KT=nq0^E8YzJ&oTZ^vq(?r*!zMGBg|`k$c65wYt0vlPHU>J459`UG6y+vi**Nz+*ujljv;S_gn56>9T0RP7KvH1H%$9BBv z$9!IV&dF>Ncg}Dr*;Ib3d4wOP-r;YP$hqogOYv)NjB_?!tbOR{)c0&v_7Z(OcS|%_xkWa@y&0%K?)ESFbaUVx|CAO6vfZV2V=`ztWGg}3Z#NqCR`tG=VcB%T!7 zPOiDsfIgSB)A4(~c80(+1Agq-gXE;p^YZb$;(EmH?IN{JCRxq6N**3%q~3R`apA`S z8ujoJzdc-4-}Yt*ZTLM79D#e#3WOV$c0~kgjDGKNvWt{k z!8E(nAi|i_g&(zeXjWeO(Cr+fFX_#qb;7yza_;@QH(MpD%7`%vPZal3c<3NXYSvpQOhuoG!`m%f zKJF0Z@vt@e?Q=1}HwyP zB6{4pY8S2hp*PoZe2{kdZ~JNL1t5a98|#PR!T4S?DT^!w_Ly_ zr?epG66#o^4m})rQ|o>03QfCFj;5@hr!ZEY3z#Fmx$LFoIp^6gAe#Fh$I2vC_A9>O zGV4O<{g&hO5hC&?Y_ zr3B`QZf(~41~e-nYD5J~U<-F@DkNTOqMvB+tPJq%MG@O*jjttkt@JDWG5&cqp3uxoESQ}pR!u4_fVZOc z!xRdn<%a(FP+ie`X zaG6N($k9Tpgma6tOSov`?)oslC9Lq{T-EY?#VA6xk`lEsFG9qJLm9dH5S-4N%G5HU zL*(gUPv~K4PjNkZFZiG;ckS_7{_-y+qQb*zLbLt!z#Gqy8@qYf=H+xC=P?g0_ra)+ zwF8XlI~n-IM>p7GY-?)QmTgykMcdvyHAV$rU|;JF{OZU8)Ig$f=tW(6?%O06&sJIT zDV@a^x*eh`Rt~!AwJZ80X&pO^px62RmS((s(-{f9rd27Fr2SX@9qv=Z;b;v zgN)TnONzaI4YgS@U4`n)uYKKlSm0XO&Bm4X`X!Unk>gT|6LdNC%!56s{;q_s^ngEE z$oaL~mTNEF8GI?TR=wsHkDSdq+b%F_T1-jmPwOY#7U(Y%+CzA5z#3zxU(2THU6tQ3 zy6q|n+$tsxUXEqO%itohX5t}m%~)vdDO%R5k#rQE%Wh)>Uf%Umt_LZfFIh%b%5Nq| zJ&>paGjc>QqYebdjZmXzk!l%?KIgyn4&y%X_M+U!eRA_EHh=+g@SV$ss%@vSZy4CG zLnkpbj8>{;H(I+yS7RvP17u1)JBojFn;Z%RNzV0Hk^Nk~f`QkF4@%-{{%e-~(4|xZnB}+-%J=G0ZQ&e)e#ffKHK*%{c^&G0I$dC!UT- zkOwW>e9w(7NtwNV^SEzwkOfrKpcO=T(^tyF3B2D@`{_w)$hB$i31>6C(<)UTvhcPe zhVm3AE1uN)w0cNO4tkkC|Grk9g47s+AL#5)2S&Y$vp<(g<--<_aa6p*z$?98=~cX@ zcx{@Ptf~6rsoQ=Mc!K;7jFk>GZ20Az{+tnVnxd}#GoVwc=#y*0I_w^P|DJ0^rihL2kUcT4{4j+?ZW7ByOPJnF=H2D||P#^?EN~Xi~=OsOgYSq_`{Kxs&nG zwhMJHT^T(+68B>Sa-h>E%c74Hc=IaaBNNZ@!AUXDG{L<2Z6i%_f0<>I;h7&N_r1Hw zs5Rb)_8f}DvC`D}30K^|mS5{3l>f-ZX}nhX0o0*3?Z!6MZ+ISMJu5A!XRae)D?HE? zQX0{IUMIpIZcU`avx)y~SMujtt4VRd=PB6#E^LWtfOYM{LmyN5AqFr?gID~J(mJ?4 z-LYH3s0T*x%blP7Y7n!Fvkrt%@bMJ*nVQG~@>0>C_X)fV zuVT;8J+iNlL#`%*X4PyW!WdbCgnot|m`D9O6jOajb$)eV+d}0P&Jgrou9i=2(+hLC z9Qnl!H7Bf&<5VXYo)9@oYmM#=N~DCEiv|2K!`tzI%o?edP^{hCJtq14q-!$j+FXKG zhktKKKWBz2e4)*CeMR7n#Y#tuL(P-eXGOX^+`F;B80UKO_di-%5+BYM)i$=LDhYM< zc|O}Y?*n`XHL{WkEid5lMnDn(rVx)m7h?}rtTE-$)BL9 zxlWU_2E2O%m07?~5xgs_JZHND)=Ru~gPg{Wf(I;*UVEo~${I-XJJgWSKj!z#-30s@ zCHC1&C2RRBPhwuNTB@|8@|PU)>4Ll$8%I9ZG%+hr4_015^xZ{9fmk2rUJvoU40mHGV3X@bARIwG05KG8a?4>`xIBIUi1JJ5AYeQ3LEq4<6A zm{o*qMSR_{Rn@D&d_u>#5V9C{{YMIz@D=0{yp2(> zocDALt?Xl)%j+9&79pjFrw%O2=oeAx=wm*$zBAP`3mCUOUy4S@Y7u%!Lhngv4QV?v z8W~tD-=8jYXf?Sv>kyCKUO=`Qwb!!ZOdplInK8#ZNcdG>oEOG7UPf1}se6TP)q2L5 zq~;dqLC?RmlF-b^9;%C5DVd}^+t48|Ey_Dm40w1~ z_RqhWI%CZrm2s*J3-iDm(iRD5QtfV71^(~p7h~nWc?_*3Hl*26^ockPdc_Q`z%puX z!Cdah*I6g4dOZo)FaEYm<(4^Rb$xbG$H;4iW2@qydJkSSLef@C_h8;T^&Xy$K!fC>Bx@_rO%uDj~3u> z9h#0*U#7pA$&YQmTXtpMk*_|#)e88|&duH?M+Pcg! z9!{?_#W1I};WtkN@ojDaA%h^n?Kq zL&%p1S#@sBMU|SavP9l6y-==y@HpcFzr0Ys?d~Cvizp7z?ZhEI(ebbVze;#m?pNz4 z{H%wXD?AGl?hCFkppWF4#v$aEvq;r0VXTd^WJJlG2CB7O)1hk63dliAucQukrxiyICWZMFMYgeKoRCF?d#ciOdMOx0j6k zvq9$hUYF}FN#=!V!6JLEH$6;hL~j4=^xFsLG|bL_K<1OLxs`jdl^d96X%3bSD z*+zZ!dRgFlUVVAp2h#t8ChNaB!Ew$G+{f>gcIM7?QSjGlIY%}TgPT^TYad$b-MjAM z1K{!Jh7~tm+5yiMj1{4Av245k81H?vnf*T)9CUw@T-SIDeoormmt-ly;O7v#DZY?TqeSYZRfX>U+a{m&~dwH zeaI2oakPUzx#>gt@wF>;w0}Vd`Ki+uJeKjRdb>h?kpM9<4wouQK>GJfcqw?~~En3*iQ0g>zx`_PaLJj`jwsc<= zOEcD&VvqJ)$tk=SMHTVnmGiz*(ETr5=DNWJClu$``RehdPX)~)3tEcnow`!<+e4{b zn|nfCt5<_RzO%;c)$oULecG#$?%ZbOYHquIr^Iz@ufu9_*#1QnKHiUpkGM*7ccq+G z(3lKMtb$AD%oAi@-1>Tjpp{~Bx<~TP6zx?ao#c4IrLc^;W&KWc{1%k z?#0K>*zvT2^C@+}M5d_aTzgv>M{U9y1x0NzzIh1OFE)8+i{z>i;@zwpG;oe1*P;+H ztL`4HK-;BU%6)_AP%RUCu8pG4v9>hYVUe*sb`VuuFq5qo6eauJlL$3H9U>nHz0+Fb zXV_DEu`55#@BW^Gn@y!g#cS$c9h^9O?jd^n@FdUVo#gjES{ecAV%PXDJkU~;RW|!8NT=5oP%H`*oEwNVK<>_iJsP;Ptn%Odnzl|=R z7H}v~&dF{@|IM1i-!smN&sZ~~snvDkb+`TWYxX<7Ug9eGO)JM3M^VP@Fz=~iZQ{CR zV!Oed)N!%gKGB!0>XoD~>k84E6a(vMjHMdC`*N6D0g*ijkK%MG+F8$ICUE~jfp)98Z}o}Vx)&ZYr?w#%?p=m@ zeZ$FvH;bfG1*m&oFOF#SPNMess>>1H-RCzKjk`jT$JUFF(SJnAKg-0GVeul$eTSG- z=DM7p6~w>SuKK4j_gg+!8&dNS?yst& zU8~-PRli@WepdAQ*qLbPQtEy0E;aNRg5I+;%kO<9=F~YwgS%CvDn)%rYUkvGt_O`{ zD{L8KC+&LnrL!G+QeygW0`}1PCDW+x+uhtkn?$z{{>{B^oR=CW&dcq2-@po7blfGr zfNZyb%twP!Sm zcAfW9-NYDb{4R|%n_6?a%RCBL-j-fw6r;~$f_cf6L;TLJ6V-pyTt8J@6Fx=XiMur& z=;82FJpLOpMea!hy4z|e@>J$?TgTC?*!^N~*bWNLm`K23QEJm>YP1adzP77LrP@Vn zF8+D-C)1}&)gy%}xti!5>Akh4spkP+kJ&3GJ-WgQudCdw&+nRCV}$|Vdc;ybIg*cL z22=0OkNEY4I^c<=wABO8_N*I43davQ{*|`98cVkmw!EWlJH73&N>mX0Vbp%NR1Uu2 zp)UzG6jlQ_LVQtA<|^zNov+R~zA^0~4PSNNKrLV{t@JOKR?+YsU1@LSa=P`iZCa@! z74+TDb12XEgLLnFgp7n?KiYYvdGQZ?A%T~H#1g>62J!aO?FV3{r ze|?!i(}VNsoNy8M@{DDt@Jxx<3OB4V^kl_*aaIgs@EYUz^wnVvvW?A@Q(I$qweC`4 zBGnb!dA5V3ryqWa<3mfZ+mc{XJvJn$K9&4aK;O3|n$O(nDuEIFzUy!`mK^lywK(A4 z617^7mVQq)oWH-I9gb~$yOv(eBhUV(Q<3$>n-oo~s}#$F%0$Y&2@wL<$iMOhvAec} zCXQ^+s^(|xDPjU|#Dl!P;G~ZP4xxnjF4A^=Q@ZeP7BKg*bbsS$#@}qh&9Eovh?&og zFY8wDmnU}Sv{p7|%*HHvcV}V0`^lB4n!CTeXXi>rl?J?MLoteeX^Jt~E5Y$WoK!ro zIpcB}nR2o!N4|eVU#%1B`18I!0@nVJ*~Ss>T_e!9-akn%n?-=H`*N^l zCjI*IiS|!EOw9vE^2U01L`>o5z{_R8yQvB%G*x383I(vjzf1Rvv!A|&niM+^&iEx1 zucOb6+w_yyHi#taI9H723o$IY9fQNgtS7aEC9w{Lh9}DGwQ-z$aGd~73!FC>dQpmJ zJav-JSRZ-p=(TKnxjFA^kNm#X7(z|)x@$E>%AH7Fy5}V3FI*OEuvV7a7|7Mzb!UtT zD-3Bpd?7*O5cq{0pEuBPUUKgg7YSaZadUS|f1A1{&euSl%FcKGaNLr@JZ9cf-gj#! zZ{G9MNQl3N`g<OQ?XOy$KAYzK6$<{MSlHO)+{K&&sx^I(MAuHq8(} zM)u^!rw&v6YacakdY>~->A%(A#E&_bS?xEvs89s0xn2}{B23PC>@RJ`Cd)d-us=Mo zV*i08xu$KnK<|-lfzwKBYHe#o5ws`z;~If?a>uLFyRV|Lbwet6w6J!}Zy8yQD{a1P zR?4($y-E%(%`|V+v)mrOyA#h!E$_zu(~}Hit0w*4SJKhj(sa6K3=VT6k6Rm+F5s|+ zd4%H6l0Jpa-w%I^2j`MxlVa`U#&NyLzq)Q5iufU)*i7QKAGb1im%EMF%Ks{aG(I9F!OrQW_cO&M(%iA-zNxK`+Px zj-lMK_+WlG)70AEZN`g#R8;?_?p-VM`m|&Ti~)9)5vEfg?&h^w-0C<%F8moHT^BY{ zeJY@neTRIh%?bT8wKubU^oPS+a%hG9qVnGzgnA&=KTYnO@wH$38~wM13fE8IUX3nD z^fXT&yHatXJe8Tury`H=l_R@Zab$M$L~%bknvxF|qln&5p&_eC@FE@SoJV>)A7Z5y z%cl*{(b$T68JbbxJS{jb!B6nEqS$HVLs7Z6@;1d)jQPps5@42&S~hzRyJaX%2c2dV z4%wsLD}k*%xoLmk=qxQ|TUlCPxQ^^-J({5tmG_b0c5!c31qpp=VxIK-pWP*}$-wo| z!twc)UlHh8&eLii4fDE8flZu5Jc$?iV%-~pMx}kX+}Y--(BY$K+hS+EcGW`UxVIkJ zmz}0Gy9r#NmzJ{VsdqB8f|tY?P!biPDg)~1il5#;7{t(X(8N*Dp-6nwx1Pzco2k;)sGeRQ_pt}!O)AG9g zjH*7(XlP(+GHO8DubqXyuob0llr?A0uawI_o!(8BN^|wRpDG46TVccnEQiK=$wOBk zRa%eXUx=!Q66)G7X5&B!d=ubSQv6joZ7=yxZ$J~eV?Q^$E5I=u0xlW)=?y&Wb9?Ba z4;uWG=zXs@gXc_m6}~y-C0m3SGQnfA0oJn(Zk8ieZ7WaNy>Try?y}IvDDmo1thPP9 zF0^&HGIeqgn}455@DhJ}?8dIxZ|m*q3x?|LHZw|zv7?>&Rm?8& z(Q~@OJZ{vnHs3hig9=>7dXE*0n7{J9k{DJ-aQr8fNi=vQo{RTfjVpU)=om zGlc~LK1KtEknf}!YHpSGGa7n4ReBx!%MB*QiQOJlgx8@H{Dfck+Er)A4zd zu)8!6y2{A_4>R1YF;%~I-5E@6Kb9qmD#(q= zmupY8Q+`@ukE(g;RN*GovJQrhFUwcY2vT^Y=2vm;&_F+n%dz|_$1wa4`;hX&o^2=7 zyIFzIXd@|Koe#M+f4$Rnd4I$nReSI=PnD-N@jm5Ml!lWSKa~ma-)Gi3ypUO{e<_tE zkisMQ39L<&Eud{vtx}zJ)o+hoUb9gp$+#ZXo>fhDzB8K|9_Y`dCwdw1$`myGgNE8L z&kXJ;;MI+{8?rIRe8OS_HC&oE)5S8Iipy8uKZTEGJ{W`>1*i7e+cJid}#c! ze%j6d7}}B&Z}g*wpPIlFT6^^Q(v*KW-8=&uVb%XQw;oP%i6S<sX7qAl zl`SAwfH(aokRvFaEKmoewEi)h{7Qq0({wWBZD3o12b8}b&JcdOtByYe&n46tK#LHx ztpUv`^-j)af_o3>` z44bhH70-)_wc7DY2dve7zb$&-Mo)MZs4_4)XY?6Tp1?ZXL+LGFa1kpH06oUVdg1${ zi0MjaNR>lfZsf-QANO?56CAJfF{=y${(zy!<%A5K;W_1^K29dIEb^YB4E=;{i z6TI&eS)i?h2@IiALk=m;rSd=W*b+{P^O_AB$ahPxl>vGuL-|l(8$t8&#E}_<>`fv= zF^0`*V`9GKbaOGm>+&Y=|5(+|x82)WX+CIJ6P`~j{;{3s>~1U3dvxj3a)C@rWl_00 z6?JI=c2dV>FASA!L5GpjMeqj#<7V0BA4c0JS`6f7GSIGw;yL69--T1b{tUlMm3?;! z+sh|pmhDZ^_+29t`Yd;xl$SVOy_l)IFfsw2bRiR32ltScXep2vNR^pQus=$5mTtnn zC2l4>s8E_{&(?(u49)d9@SC#Ul%oRUsdS@Ic_L~;1BMykDVm$V2Xc*l%7;mK8pgOP z|EzSC2JA9X?{u145OOQpklIzW_Ovmf_i^pk$oA|^csyPcv5I~FOcB6WxvW4JHBVI6 zs=SPUokCrA<}snqv<6Ptfc3*d{xPSc!2CmNhl=EPB}~kvj?b0l_XZKJ-xpbg$`_@F zQyFPpVY>j&(c8dttUN9}F@J5b4w>8%a&4O?;b(>N0ML?jsw0VjY1eXnnHf5ORo{ob zv?t{+@L3X^sm_ytR|<11eupZUbhj`xVHI3+X@VA_qsRUkiuW)sVtwI6Lyg(D9#xS= zeCAo@>N5JDz|FG3@oqG$ZZ+h&qlNpF0wz2Z^1&QrbKBrklL&d7^61by51}3WNNI57 zO)@T^puVBd#a!%CepB^gos+Km(1#X^-Y+Hc5E(kIGJ)4rrmHl92uv)*tTdkT9!_) zOvsZ!n3AJFHw_iEeZX^@Xgfoz8^5KF`r9(j$GKCCe}Jn zD!Wv=`u>P&96a99e3j8rq{XBfVRf-yZ;%_WNxv={Mi%pCvzU3Q5OV)bHoUfpm$g>Q zSADk7>~>|;v4wA_KRZ79%vIsQ;e%n5X>axk9xpJMPY^a9WPVH~M$(pnnr zH;!>$vg7S%wBdPc?!Rd)kBB|RQ8qE`IcWnWU&~AWozm5Oi%!E6dBfp{+A`&oX~$5n*4wx=i?4;iwrJl%}0VctG^h;ruLU~^hsoYy^VHo2JN zexCAm4asB9oY#m#*V}NYU5Y5UzMnX6l%BH}z@`UTV$f-v(CArk3dV_oLu+d34_5=D> zzlm^aQ_h$l<1Kyee$O3W?~^Wc=Tfq~h`nSQ#E%xcT5cDCW4DSg(}RSElQZ?aWswOJ zC(3(1UB%cL|KtE4KRIJivbddCi(?`(`07}1Gu(HID4V!abb1j)7Z-LAk*zX#|IaH@ z&2P)CST7_jk{g=iNpy*o8ULLp*R_inwaD_}_$vJ{ zx1n-2ZJ@75rmI?`#+esHfNNP+``9P0p25>k43}n5yzk5jQl!;es%oCzb+t9q4maic z^9KmOa<)3gnQnI&PlIb$mPe1D57%t16Vc$fHMU!NLYSimdW)Fadr0|PW*j**LPcN*~R4j(zb!DHare11}O zr&#>*04scWbg=;M&1k{Drs>pg*=u3H5_`4|E93L+#BzDP^;&tgppEp()0M+pPp7M8 z^69v4o^sboTho81RJDjYp&Pe4(9U1W4YzK>{5x>FaOq#zjBhZA$Id7$9yK~j0V}SE zHaW4lrls`Xk4?ti0%yejl9|HuX*<>`Z(*Oi)9~C$hS3@8P|a^(V5mA?u+l}MrpW#4QfeH8 z?_}ns3Hz;ifqB~~bmug#zj}yP?ov1fk6&c~e+6({e2jO+xcuO)n?h;pCVM?K+#ui^ z)%hv;WBe8xJ8C8ubg!1tf7H^ejW11HcnpR;_? zEmDq2O~U;+9}Nf5lBZ7Z!l(%zx5P<|oK%UIeb`LB6C>n>HjVTPXBUyp;`Q1}t+rTS zW()(51u%~rwJT!&#Iul?cLKL?==!fdnZKKH@95&@LUB*z>6KsK(ddXA`{o!pD!;aG zRB`nh5n7zs^?6+#wII5<{nYXv_rT|sksWGmH=aFxi#c|c;7U?G`Tlcr>Hnyp>G)wG zh1K22z)3n-#9`xOdL;+3|3eOKAO*Xoj#`hByC54FBdxWTv(NRZFJas$GAE> znd(+)Ey~&o%E(?LP-8sEshn9hdZze1awz>bvI9-BK18dF6(NiVe+hj@z#Z!J;uYPh zGXgr#(L8gdoW6BWL51x`^_B(9>J7gGE2nYLsQ|HTTvu|rd`x72+DOY?V}UyfBK_Y@ z34Wms!&ZaCXYnL!ZwV{*Nrbjzo<}Gox%DXe&s?Kbz)$=EiJ~|nwVehO8)~?q|jWxT5thZZ~0pQM<^@c zgebc(TpKDf* z!}!~Yg0!Y%6m?BMD+?bR$=2HvX^MX<@ZdDc?$QRm|J_i%kuxul_HW!yk4ImS=Z!Dg zwrVXHV=kBdjUAaO^I{xM|!$vW# zUq(&tY4L7xnciX}&>a`&NsN^|Idu_NZWJjL7LE#wBfOta6>mjTLvNAdGvJvTJGoHq zkPdCn@r@~1L!fg$(`k5Rv;4blT8p_qk6H4Jz;lU-(q)3vzc5*R_i5!d{nV_g3Tr59hnxh)>gyv(7CKJrYf zX}smP5A}%ilJzF`A*DOtwZMJe?Z3$O16t%_S;9nXGq1-@0W76fo*DeCr6(_#yH;^K z<|Ln~c+mO3>TDYxA?~&6L8rYtF#61F>XF4eE>2)@0Pl+JBs{-dG72}XD<3A zIS)d;(%|ixA~AA~b~GyhSY2Gl+-ifIwqb0(b7JIQ%6ylWtJ9`Vzb4v$Sk0IV@$;3H znKAq{!Cw$?)d=tYN5JP;&?|Iq#c0)hU=3&W zJ;UmA8ns(3eti$2ue)sZN4LiseJPhda>>;wxJKU5sJ?)XmTz|_Q0B5#mYG&~ z=D)!p%ica=Lg^%E2U`2^9<=*%#U-NOtYLCd+W};IxFSKP3*e+S(<&c$jpD)0%v49QNJ;R-~&RHpE7}O zvQOh~sBQaPzUrFZhR*)2PUViSk(s}b6W&XTms}p6qB{8-6Xt_k$&%&~`|7Hfl$T+= zN9it!&y~<}>i@E7iM}*NZ;4Q8;5DOfzX#g7KTqlR;&kA25Rb3Yg{sZ0MS%}H znQu3GQoYY^H2uU2YPs@{K%bC7m1uOWQ3M^WdQTLun@233I-0&UZy~)K`0L77M>$o~ zUu9s;|E6cOltIU6(1OlpbVV=5F=$j+bq2>7$`>ll4lT?PiS9ban1J)VzRWvz{g%Y2 zQKQPmyYh0!8vN0WqAkm8bH@VySB8|m&JUdq%R3JQ-|4piV|Gcd-+WkNuGyM>2_8%? z8*o`J$U4qTj;-SAu|lsm@t)#LxzMu*-KzA|fWOha@?57uL)$8?YC>B|U<35_Ep2Jj z)&d!V)*>rK1Lx9#7Q^s4ld0p`fy#S|ng@2vfvt`SRj-TMKGIx6Cb61V<)yz}Tdm<7 zOz0z}F@?+Q5=!6mE$2tdhtr(Q?xMo)Xu4E)hxk}CkkIS2vdv-9+xZ;)-a+Mesrb6O zQu#<|6pP~FJ;8}w_3dC;`%NnSotyMDc z_Pc05;3UHfEC0_+A8nx2a7{RfQ&P6uB`TEOD^|U*A$OZq+@VHA{dVKGxt>Y&F7V30 z|Ao>ci$}Z^N5^jx3NMs4gI=X7eRmrT;@|SagavdmCl#J{tZ#`Xc5LPN99X-8@}Dcp z({|ze$I5ki-;4<8_oY1J@i=(AM;MD7H4m&Z2Iv6IW?E5GaRag$p*Ti)nA68fbI035 zc}5p+1J@y4*V<75yJ)d*)Oy7uysP0>12|;Br^;{(320a4mHFPy9Xu}odGY@93I3$t zVQ33>I(t(hBO>4)DQ^c|BwChjBRihW68)BqR36M!bI`?Yh5%PmMvrEMV<^3rtN%7% z+6S%phm;>y-1l`?2rz1t(##!68l-H%W0TthQH<%)bxMb`9vA%jqq~PmwKG7BrEgF){!`Z^A=4s636K zhmg@(_+We(U8}wfWAjWVTu(%HSD%{?{Di%rCMb?W{xOe~p59tIoxe@ zU%V(93a{&Kp7y?E#7^>3c@Ha21AnW2V>2Jtujp0GRAX?p%wna@ByvWf&#Nc)=kKO? zh*C>@QTd=mj!0z_aeoA|G3Xir{R`i2quZ~v&eb!>0r={2LD0l{i@Bkg!@;by3Gy}i z?zM-@+`>A-9s>#5TC?_gkNo0NuBU0%?3LC^Zls-9t}a4tIm;-n^G^^mP%)k@{(6)H^kaIpT(}&D~w!DC><@^Ro1f~uY(RFi7dny{HyAm zLvy@Et&9Ukt8=a<_(M+mT$5ra?Wb$Lt7z7?(-K)`LwEy(o7^JwWQ`hM;pF+ABNbE>KSscTH{r zlv|v9F!A z$^{_n3J)OR31Ietoa z^y&GQkN(h1es*&!3e6+9iF!9w5886yi4&=Zs4ElR!nBwu)N3jcC;#Nq;fY-Ii@W%w zQEmRCdHsC}0)b5zBRc}=VotqXm3$RYZx=;xhJ1uF zV$^-YczV9}X=hpB8eujUfA2ashEcysre3P(Q!|Qpod%>_68#k6^XMGFN4P$5IR0^M zOV=MP`WTFQp{jTP7IOFIQ(#ns6P#*Frf~q_5t29KR~lRb(gR2}$7k6nC^Rd5riSj5 zU(HKE>Tv-1Z%On+wVYQ1_3|Kjof90iJ0<3ZQ!fs``;J#-x$LOO+AGP z`)A>!tX;fe&>b){-wB-_Ysul!_0);iyJF_6v|aP25$SKDmJZe0m7qSw%rkq<+q@I6dg{rm-Bj?KzIQPG|fz znXA|ZWH9~A_Pif`uTlE9lzlu*IAqma{of>s9slwPO63Ofx+fc8-^_z}V9GTF^)%j| zSj@}iUReH2ABQh-WDe=pxNwpoyjFdI?wc>4GZc*0M8V&@SA5q8e>~W169i5RMtRm& zc$nT?P8vUi(OBWr`lnoW?jn1o^+LKg^|Ear$m+I~zR$|Qq3>;(c+bk%DD3=vGp`-j z6Kcn$vteWGvLb^J4{PSg?<5#x&HFc;6Att z8t4yJ2g=vrzFC~|`p$in4vfS*3EJv{tF*_zdrKTpx`+MpiDO1@%~@(#F0U|q&!$-g zz(TEmJh!0}hU}?Ex~5uvA`l-OZ30&tex!W;2HKSqc+4Mnpdak?cq1fr`NfvZF;xd2 zU4wW3_JFY~{zumd(wGMh0>9iv?C#%_I3;g9>wBOb8$Q*Y`x}gwLo&v(f}(z?k@qoc zhlz0EP+Z+S&)mL`#r%1~y4rq)l<`X-%k3Pac~dmjIds1Kcu*ud(`x1q{zm5z(j0Qa zc$~VnDOL@wfF2j#OScEUlTvAq63P=6_UIDt|9ughlnz01>u`;;*L*f%+#dXWdnnYL zNMjT`u=X@pMXWBoosMIE&y#YvMpNGUrK#CXB>6s;YgYh>7%V+q!6zlnfo5)pP>j{naCp;_xytl{wlST_7II=4T> zDEHv$n+vVxEf7kg}y^t*&3I(*;tpmI*B`!lJEDFgSZ^7d2T{6 zA3yRW?B6erjkx2(!t-bHZqo;YSFW*2xy-)ltii~whcUTzBeklzHk_j1K;R+V*~Ja!(id`fXswx+ z4K81O6wYl40f+RXxa#Itm_Vy1jV?}PwQmu~jdo!gKWsB);3l+wAO& ztM{~m5lIH{FrmBL{8k?r+r7K|qgq>S(PtKJ|8NHv|EC8-bqulbjwg)f0)BP(LBbGR zYqvwx35xhqjNoa)6nbqa3-{m4=(VU_ycDL;xjh22-~S$gyFUD3d#1FNGhPOwx8|_J zO;0;mcU~9w&FqK)0}{j>aZxA3zNRad1AC~19lY-#PxbuowzDP)Fa(Ga_}m{YBA z@g9xQB)*aMH_;f~X9t3INK3|?&581U!D$?PG>hOF+P~T3p^fF*c}6(rbYtA^@)L$P zkI(t2a}$!at>oi<+R1+ZWl9Fe^Krj^AiFl&qHeq;&p+~6pRY6LzFUw@ZGbce9Bp+J zC|1()Hd?Z`#tIC}T43h!QrKcQA9Lqc^VqYdb^12yTM7H#-V>a>7*gFJ-x#kL^RfU!D>h+Ht^*1&w6F9(ZNF8ff0|;sq@@vbY3C zb`L_Iw%#x&bO`oz_{XTmm>B2Qifa&>x)NyKl_%;R_;x#D2uxrt1-g-+L zFqo??e>szpeMY(|xDnJ_v~MHTv$y47{4+y?FV~0i%7SsM$EvQp?cQTJzxM$+h+p`D zDI++=p6P1KIm=gD(|-276v8LobZk3xxZFgZ_|#C1SSs`G_gty&=V;uzKV(ZZ#V|{+ zhw^LiZM2*i%?%SP`JYhwe&f0u4oLXG7UeL7=A99b;r=gexKd+}AEO4Nf5%f2;kk5R z$V*W>SYpwZ>vr43uQvQ22wt92ZK~3EAiJJ1EC_wCG=JGzCcNe|*J|LJgN$dd<#Ea* zB;F7@2MHGx!hFpL(u-xKx=4A3Wfs?9oXJSGxA=t8{Y^h0PQq*t#QDSP^LcAuvSpjM zK-KSkNLbC!*UOdu%^QGkT^Dhhd&<9&*go3`NE^Vj17)z{lS7@3lCn-HqCP*h?+>RR zSO^WkPaHof@SFCLEnsKH_Qr?lQJm@%d~e>sj6v2i)lv@WF#bPBH(0&-G#}IaG9#W6 zx&VmBfGK{qX9&W@=@@ZQQrK0l2@qzy)5_e0|T=mk;=rVdgUN4J*3G;hRpaBJ`$|)g1%3 zc|zi)o7`ks|D3x`rbE^)v!!>A1;#?O=6;c1$OlL>x2#39R|{BFr(y|(30G^ z<^W9Qg*a&FXvKQ8gL?h3F;4Oc!CP0{VVc!nMJuQalv@`u(o_AaDzt=LH;n%wHps~O5*djkHDwtsQk^FCo~-CudxE3v9Q+* zY}2q8o+&%b{`j>;8kf+;eB)gsnJ|j6shbtjtOB>tdHpDed$@%(^K^`|*Of0_X&}>> z@E=>rj2<|spOS*PmeTdN4WeCd#p;6vfTPpy~d-!%IFF|iklmVJl*oA0shr$ zff?HCxrnu%w4Jx}_=}{y;b`YreC6sWQH}AgMMXH>rzf_rHyP%woeMNJ!ilrACoS-B zZwDlPAxzEXw>~Bl_tUx@oi*?!$pt$#olU%Tkdc?*m-e0~{ZOAd$)CaW!5q-IZp53O z8NA`{EUfqCG@bLOk0Q4F%g^D#N&uHe7g*rIVL%!Z=OrIk!t=}V@zAZfb4aAZ>VH#u zS%iVp`Xs5ZXAMv;!^N`SD6p=-#Tm(KR}m&_ABNELk4OjYByHA6J}_i1X|Vl_^roCK z=qRK42D^#gFm`Yh*T`*A)IQ-qgnj6O#1}w3OgZDM{_-(lq*d6x(W|iNua4TGmnG%f zJ9cqr4kK+ooHrI!QJ{3BtIhzF3jVdQnN%e^j?8#c<+6Yr=V zBALKpiiUQs7(?VJzL_T^&q>FuJLM(!8hMHN^_-iWl^ zCfHQ5k99hw5xD>(o&90?wk9%ZARr!r^R{|Oe5b};>csOSXX3~ePTZZ=;tC$z@d+RNo4(^XJRWIx zL%!&Rj{4vG^UQj5HXpLWjLzLDCj1b4w>zM^C0&ss=82rqJil{{KknIu2@I#4r!#bJ z0C{svO|+C(cHAsHH@~=3gLHi+ysPfJhcxyw(sM0TfkDEbM$kU?)CWKsKkiw)k$Ceu z5_c%XTVTEOmqePCJj^E~PsqFfHIspzlJYR$IT2B27PQC{=6h_u*NaMg(#A~0G zpHDk+@@DAacZO|uS<3`|ho*i5fwh&v25h%|PnBvH%KCM~sq>EUpxL2tw6aPvI{uNg zOnrXv*M6n?L_L}2S`vMcl=1KR@Ze<95za_s<-}7!+(F*|Hq@WL00aiQJX^`BM?rf^ zJ>tZJKz$Ar_e*?I=P!ldA-&78YPZ7{r%`-R&jq5F0K!)Z+##>YDHb?8Z8IZ|WWvu~ zKUD;+7SVSsQ)a@kZt-}ub4&85I--Y!et}!S4a8c++1j){|<>$fIJL8+-^H) zEgnMi+CwI9A#}fV@>*+5eApWY(mp(22X}_7mtDEY8;c8Zu=u{EN?s{vM)gqU+I|e1 zQeqGPb9}(6raEbcHHu~AFF0LO`N+f3FmXN)_cxcRzoJ|{c8#>a7$99HZ+ktO&k4Q6 zmiZnMIgmsA1vEz>yy>6Azokx1EBMgTD+YC@VJx4rZj~NUuEOC$H_M_pAdkCkwSUAn9va;KJ4k^u3yViqU)tj$!0~s1Ia< zC@YG2CM3uBDkglTM&Rafr#xLno?JIWr?IzazRwffZ zS z`jonA1N}Ux7}^g4eAm!9oW(%A$4HZl9vu`sHPygRPBc$4SF=D*Z5ZX7G@E$@}?#36+ z92>QE0snk`(XWIdv$B9R6SEDduTr0t@-~@u=#>nDgPpuMAJ(X)8o9rb;AJ2^iK6Ei z7xJCGPqI*`Zg@g@1gCzeN<0FDU!1NDzkfDC(jrLShmn5bq^~ut+-@RqHjwXORA20r zM-)&$09gH%Ao`1kW{!prCa1x6U@2BqMT%Y!-(pwEnAs!P)Y6|c%0RUC+zX=isXt0} zc^q6D{Ri>ml)LZZ}%k9@^tP)5hE`p`f$qYDvd%s1ZT#c z7X4z@qTWWKy99r6()pOzsRwtG{Y1YPqC;GADCZgJ8=!uuN*WxK#$OUVP5JFld3yjz z6T;}=Tkw2^j!GN=huax4(f>6MyvWJJ$)j`>AP>%``=kTuM!5g+7HQ>gV%`|_sg=hc z*P`fSM&5nHUN|%IIBwbD#AQCne=8!M7kO>#q(kH1%J#F$xf0U6c z^6NYyaggYlK%Y0WVfQE_;W4mR5$zo_+6B9QH&%mOlBJx;FpcmGHx`6*(r8d!u#Q=7 z@ufUGBYNgc;8?#; z0j`;n0K8rVPAO}|EW=~*1+7=RzA+MxhOfl97h54OVjV0mH^hx=cHzx>X7b$XS2@eS zWa9Z7oz!jJ4q#8}hmU{#o#}^e=N?JV`Qqp%uyIxk`NWm8sFm@IUHv(ju3?6)#+_&M zcLe>D*mzMI#&!IGjoh~I9A^{0G`x^qT2;a|i_e3rato{Gr#plzwY*};a=v!a2DWp! zIp)UgLGJzp&+STveJOL~@25sWd(A0~iKerX@vr7%^m|OneaeTMFJR^Ft8xB}ZQSxu z94xfVhl?p2@WqQEaMb-KznkZa;^J+FHZy!sYvIOU*nkrxJ7^oJd_1NFr$=K-R zcP1?$Q}ZPZ=-%NEuZL}Jjl_a}L11lA4|6x{hV0yGzMK7)rsFXFpLP<@_V2*2r}qQb zRl(>r?F4TdqGGCXM^&v%e6*OX@zcS~hia}&8ib$j{4<&_ZJ zqY)-K^?~z03Lw$%F&Hn%W4ijfoN@_n8~m-i_t9)9>iv1aS3K?sUer$+X1^He^?0<$ zVLY1|3Ra8lq$lZ@;pF^H%AuM{)<1g`7WF;|bf0X*eS{~w9C4Jpo;)h)9-HhsQ=xb= zIZq3R=f1#V479{yH66H@lRnt*8Dr+$g3-qeu? z?3FdvL4GXE4V(4t5OzWr3jun*H&GuQyNd ze~EkHcbiiw(oO3;<9rbN`$ zrikU|x6R?1t{&8mH-!#0A+V^!D@MP=nG+u{7u$N=b;lN%Wz_@B3*PY3b0K{4x2726 z-kY87redVq8|6`j3x_ZI;MCzdSnYX;VdqOl?CG5Ir7(Dq0b1=|jh&OSVMfqj@mWaq z2hP67@m<99H4iE0z>#HGTDj}!3l z+@=_oYOg698_O+910{+zn1{9n)4MjTAKI$hmM-Q8tj7Z33Q`WTcV$08Kh#jAdSr>#-7Vf3Kqk0a2`7;bad*Tkfw=N&%?l6W<|6La2;jiM(;q9{qc-rYAE^8jgUY_FE z#Px_0GVrG4nv$kz+Hem}HjIJ3=F{1ejpkCrRl|w_^m+}Jak2GKcTTaHF0J)tj)leXIt!5Bmr=`}8k7r)fx?@xGfjB9|i?26K z$4hM*svBoTbNQnU)mWgq`}cOd?6-=w{%C-rHY$3IgMUAL*nw9;*imPs$WJ9O+6=|( z2p8eG?f;;r_jxR_(_n`V-Ro+u>8L6Qi;dDOj*bAIQU~?t=p0xXt3w+xhe7y`Q*7z* zdh(^dTUno)Xfe-n!Q5jaf1$A)tPp+++~AL|jnGhD$aHTkcGiAowfhH~X-j*N(6jv_ zuij;z2JyRhWozN>-}6d`G55j3d^C5w^@@#J5dlqh`ay|y6w_u+Fu(N?Ca|b~?k_}w{J#C0`J;nA!d#?0lRM#FSUcz(VS5Yk*L=@eZ#hAU zaO#YY5(9y*1%y#5<)1PmC_sw3Y{>{S&nco#~vYtAhi5z?loyV&(Y8Dq)js z|KA;UWL6LCoUxw0_8cn}4Zp&Ed3w+@N9nwKL}U2Vx)=!?k?;*1Z_NXN&EJD9;J-I! z*!}fmXtMqTj-Ggp6URy;{(FYRr% ztlGkD^y!R;%--VlFe-aW&V};fD7dg4HR0DA_JrBvR)QD{)hLKuu(r|#*SmMI$AO;8 zoj4n;8r@aByP-3jZfy!%53GQIGAn!)9K+UxzNpIs!csKp^N3qbX(Dito!k%xUw=;m zao=$rSK~azBj@|x6pG<=&|Ywc|7_nJ`WGz`90z&_V$f9U4)yJiWBvNcnD#@h!&0Gx zL~X^avBd7TCS#swAF%0YsmhP*t7i%$z&(3DP>nE}OPIKC5$yll00#N(-YE{_QPggW!?fry9I=Dx5v_Y9kx8jn=V+_G{m<#Q}N zZ|_I6sC{9ZJz@-DF4QA{cG90;+FF@oxdi zyIQGKBXD}!D_;Fd;V)iX0>T|8G-8OYEgmp*M$!qmUosyzR{WP##avUk;>qNQarobWhBCkPW^ynuFU7Wlpb?2xB0w zd0(7x%MTMr#-QMTrFToxf-~UGm14I13!Q_NqNNIM{GYp}thEQ2w~>~7{8SX~UDrmQ zxp*29_5*1pcKz}++%_nm)_0CqBkVRZ!ZMnxzR>aheo^bnc0(2C?${|&J%eqZ*RU~C zLi2(JXdUo{=JgDcE+)Qw#mi?s#FwX-daQXK?D_bCG_@Be++?HNLvz;5*h6}(HRezA z!w}=m6tjl>;(Q}mer2n`Jmr*KTV6A3J`4_-3YL{GIUn*6?(bLt6lZp=?5RX?Qxa#K zgQ|6Rr5=6_)emXQ;MjLK^?|*LqrH%Jenz3o4m&qeNFUPvH+v*8Pv=TM z;^0Bs7_N)8^<1FRYKeTrA(`)h7&meonBX?bl&jnR*0!dsXH1`?nHFlfZIXb3gX6 z5i_ZOTJQ{#&XIb_Nf1yo3Uk&tV%FtC_ts{+;WfS#NolV(_5OD;cwd zI!+cC@Vf62@?*bXZ|WswYs7e&{DZP>={vfwKv;TcoJ!a)pYA3DX;_^6Et1pqrKHx4 zX&<83An?n4X*_Y4FPfgO5BpXzXtb2hEFHLzg`O}*Cry#yC=l~N{99K~Ve7WBsLL5} zEg_or^Zk-DYDE*?s=p#xEzW?i`!`X4Pe+bFr6X`qC2j=q+VvHid4uYtI{#(T^QWe- z$vU7pWs%kwk$eFV|8ZAkEc3SRj6uVjO7z*hLyfM`Xna?XIH5(L$TX2W80~x4?mZWC zL^wn|5U)@z@UJ;xjOrH1kHEt>5!lsiE*|OK0EI>-jl+8QZBbrbnK?k{`Qp9$;&;%h z<1Spf;wVrJz<#4NNE~SdR}0O!Zo8w>hW87x;p+={=kZr5UY5pXYA)(`^N|dHIelpl8glSigmUhD$;CAj(m2lQ}%R9SWWwozPyVgpuA5nhe@E z)`uZ-cYO9%MPa|mPL@ilr@Y{vq0NE#*yZ%O&ZI*gfwXMXmrm@*2RbKr2dAesZVpSFrlNuQ|^oTgBC@1G=;N9v1xQp z5b+TBymx0h9raYwitPT5H>`K#){O88XMagy7fvM!U(DUCcVqfG8}$1TqqIH!O0m)3 zFYX5^uO(5hp6!;%q^sm^DTYE%@S3&>jP_gN6c6%HCmCUYdenCrjJE2a5>L~6x}xwt zpFCjl}tFE)|gN4pkLesGWKM%7z!S&g-%vN=nE^6pd&HTC? z;qFiBdLA{pyHI$WE)%=)DUJK^{Ar)D<-~UIppCJpBZ+E<(S0kSc{#*EKEi7VF5-*n z*;&$2Txj>Y+Wv8XkxwNpagf%h`Jrj6l{lqC3H4V%)HbYfYOM;MryLUefdLIaaIM8H zvDtt9&c8U@Ms5_64ir!Rui`Z)JW@9QuwWqr#~|s|I;~fE;<4ZnM!r?uY;c7wFW)W+ zoiX)t1C@Fjd{5q0@+sZne&2Y|3d+Xv-gcyahpBEdg9chdbEb5d4CBxD1pTc3jQl1~ zv@^s%_jIMX=T!*(SiwlwK}VhIJkz?2k)GvxFK1G3Cjm(B$oKo2%HcbDK|ef}Lv@Dr z`?xUjTFj1~g)^z&8!v0?sBi3sqrmx!9=CD5Jeg6?14bN)g-upBkT`{W(ghY);(;_) zM*4y2_CGHC4!fB0kCW%-_POEQ!&iYH3-{rKh%RiMk)c{)>>{}>nj_+@k~X6JDwVQo zXq}T%%WnAhfPabRLhDPD4UIH}C7ke!>lZv`hpx5bLQ4?70%=|@IFWiQ#MvEG(oXzr zg&m51R^GaEg3sjne!Uct)AjolB55^7_kyJ5aOS|%)W?_(KOJ_`kFnf2<2U3!t^(tL zO5U}a^Q|N66YdTH!M&oN@$$w6cra3B%pj6ezAA&`{V_Y&NB%il!?u6RKKILM4HDNw za;c}%zp_T%6PqNo31M41nDt|m=qCwmsN;Rnw-DZBNZ=YEo>i%rLONg{opTZ=bei1O zZ6_ZUZpY79`t#Q9vg&eU$r?QrdTn;cn-XakPTCtuui%fke7Mu)Zw(jumuBE8N2eX< z)Du(BG_K{O(b3y&w7^_0;y`ncgth3$W}(pG#DgemS!g4|Ss)$Gzr7fbFUw!xhs)nM z;isf`z({>^*;^s+3@1~Ik@{Kki8y+0JLn$kcHT%`Adf`q7r?89hHBT6-Q-`_V~?x} zK=HzYdhQbvReKArmMb z!Yh*Z-y(7ihI)Oa{RQW9Q9pKWotV%Rf?r50g2j7F;fIlOo48~>D_nDvukB`}()D=D ziHVRu=mQ^D(np@Gc`P)mq8Zpq_$|>J6nuk%cZDXS{)8ub=eHc!fPIy0M z#*h|DN}7Vo@NA83yP=eiZ<)xC&pn=syi-N&^`31&;zvfY&N-KM4N1$3m~fXn5$EK!t;^GAx}lQwf^nk$S!0pAao(^Ym-faxbeJ!{ z(pXNKR>DaWN#su?@@!1xtLXa*-uPnts7@zR@062PCr{)HMtg2(dbwT#@oyR@qdp=d zt|I;BRp$?=FN}TP_XdHleR}<)9?~%pGkPtZJ^b%AqxaY8JAp|PB94$p-3en1_SRuM z=}hRZWJ0um2vVOBuU~$o5KcfqhtJCS8;7XQHWCL`vpmmCMtZKU2NzZI5UJmTgX8~D zFCrRwy(V?N5YZEEljS56p0kh9OE7C=pvWOceP~$tZLcEw*Kps@6Q!Yx?eUgLFkbYWq?&e}gm1r_~c0Dt-+%i+4Yr%o>1F@6Ua^S-XcxkSJ7QGhpPpgYz?!({kNY4g*E1Ii6 z{XSyH_O@s+%o@|LT*H{y;V|0!9vbVqazmERCiMCa4Yyyz6`hOO{fZPm^H@)|^6Uwm zTG2$l9@qOQDe80XPhxD!ZaDJF% zW*W_3y}OUymjC2BZb9JiJV$w=`;({GP2?k=9LB+`Ph$1>G1%`%8lU#S2nT+?0fww6 z|5!lheYU^D7raAQWjGU9`<^PZss$0-16gly+5_5`*f|VeIm+q{H(T87b)4;ke{iq5 z9lopXgoTxx@tE#M<{WSZR~SWtU+^g$7;lQRq$ce8>_d{>lLNfKxfhh!J!KDUV&Q+| z?ZIMMb7&u`L;LW3!lF*;bXXbh9a4A5f0^Af z(ZRARnbT*oGsFF{kKYJR`ON|o^=W+TA>zS4aJB8O#OY}8Qq3vUZW@d0U;IR`#1Q__ zsH6H-_aDDIexz*oelB<{R?t2}hV}`=;B|fnd2LlHE^+UHPkVNQ2KKwzroso3*%e#Z z%}2?f<~N1&!HzJzCJrz%2Txk2vQuuI*)d((SKalSL(|AsaN4;W{Cm6`9YRByO6T+T zEvn@;(|5uwDT2OGF5nb*waNGc5SI3q85z==Et62FnQn|-Dzw#o`OW1-gFybLvYG0; z{~fdUaD%6suMx&82&FruoxT6H@fsa7TDfMK}PG8r9N4nOfkL7lvnU{2qu zy-~WLPp|bzF@sV4rof|dt>MsqI+K{zaR!C%X1n2`in(Nt&|l; zP36$~@EuNS$?>ND3D95G1k5@-;j1SX@#NCPZ=zgrT zO!v>Iu5e679?)yp_~+-LS$Gu_H7@XyY7%a;sQ>wh4IO zFqYPAet2*S#<|itm=(70+arLPQaf_yv2^LBa~5ms9Emn|Og8hoM3_|0!Yf)~SG~>L zXW4$xz#>dH&481|gFwua(ef1{_XR%66AKIBM#f{P*Y!5=_|r;#C2Jom~7*aLGhPpWZ(kDgW?ztP4CV zE`Yb+_o7dG4SzRm5~dn1fJ;uMQ0rsG4IhNCgqWRt(CcEsH_)+gF#Rna&j;rMVHEM~ zQ~2vh=VH61Gs+ou?aF9e7I}<2Km0DRRS}>2JMkbk82;%0xjGYcGBR;?l_f4|>4;SO zm@&H@{;+gVD?__tS?mMop*tC=E|F?jb&TwwR(0A4{yqi+x>8T4{qS;bu`G(}y%A)_ znBl#&wfG=7pX%!y`Yo%C7#|N|)!RYiza6x_oq(_nSDx)IQSPdg zNBGNo6;f>|jV@lsfY~jvXKWw*abvUix9r)|h)u{3VjH}lG6UMjQ@?!*BYgpsZ_w>* zJ2>VPizb$hMP2aR>09gGSM+!r+*s$ z34>p$d)Oj>Q`l#Vitagou+81gi0{Qb!k#a-TNIQ>`>oV z90SS&)q3YE%Gbe+^rbS)sUB$g4d8vQq;tYp{O$3Vk^zDOr2#M|n@za_9QpVqS{ zXMld8iWO(JBXI(VyeI9$X`DbB6`YoDK(CD9&_6~?Xb*NtcLm-ZxEVxEU-PR3fj?)T z-{CYK!M%0$N7zr;^B*TY$h=*>aI{+#F3l>3w^^(C&KLjSjlp(&xVlj2H$1VbncTR| zH!e7=S-2N=EX>0PY%)GHI;e4(9ZuTz3;c5HOPHU?uct;Sgr`zwC325>2?4={BG_tQ+?Yw_~Wk<^k2YW1aR2N_YgD zTRH(N9L~)%vXSNhniVwWLff_O86vbIklti8Z&Kc$W6VfTOAcsyf>XVzA{PmlLFj1G z0{n32>DbIJ3-)U_BMXeEUs!P8IWB!wY*i#CEm>}u_ zPrgIiX)tba?}NvC9_07?8Zg_{sX+P^KiI@`s&_n=|3)VW}f_TK`O@6T3?&82{_-al@#qV z6-bxSI;nKQS%ee$EYmR);+>D+P_r?N=9?2YLC-2P^?pTX+<(y%h}U!e*Fyu%p;2wg-~V<@33td?(!o zwsn5M-I7~lWBp9t-_cAZd{k4%8Q`GT<6+_ROR(7M2#qZZ2p7Q1W0sQ9w7=|sWdM>! z<?X8|JxRKtu&#C+ zdByMr36qHD%t=S?B+acsx=;4MG&;9>S%jEZPCf|F6+RQ3n-g98NeWFqfTWRgD0jsC z<0{W#NV*I*xU~`b1V7r%q4o4W=%Kd*iDNnWJo2t9>a<5#+9D?AU)&$ftOfAh!P*Pka(wa|s+lnf zjC3%M_xO)I>2%f0c^_Y9Q%~(>xq}lvFscpsaialr`J9iW3FtE)!kJJDi7)~xL(@dv zv)k>9P`}sf9MAXf`Hp3gJF(Nl$<| z{XFCj&(Dx|ys3$DGNjM!3N)Xp`k)IZEvgWw$;6Y4c!>PNK}cyAmGj(t87BO=0M!Gx zLZI_OXifWUFE2;}^0ks(PkniHO-Jk5Sp&V6m(v!i@?uO8_ zIil7#x>iFr9c*&eUSgqHb8*V&TbLhPOuD8Cj&n?6R|Co!)iS(G+W>^0*rsVbTrg@Y zv^tV@%CUZ+kCyrle7MUEX4b(SMsv=lmwBMjo5G)x7LXfReISl&iIm@*JR+yDF+Ol3 zy!2QDuJ94bd*HmLM}@XljBDumW?fhOQ9KT)X8^=WNE)BLE|~+5|McV{j-SVw<6NIm zp`jE}w}eNS(ylq*)c!oDdB?k1emSHKaAV94+8=*}MrfG*?hg3TcME)zY(;J2_nxQV z?CtGHK1@j+w@vsvmS|9~uJ=$<&sB=&U7C*8!5!7R-*>aQ57+Zojw+^A^&y^#lxD}Oe0umwAU{F*Js9clqGkln za?<|7hl0S6|NSkZzx@W`PszjA&D~ZuLfYu+1EifH^m`~YY4(Sa7R4SlF+d!RPM%2D z-(2UZMUP8hS%u#s!k1T|_0XI5>UUT4XMiw)Wi&kp2`v!mVWjm3)p-lZIp{-=0V@WT-NWpo(Ng_qKN1P|sV{I6DBszd+G_smchEViL7Z6V!pN&C;=N)X z$oDYPGYWm4O1zsx-dPs)+&^X`Bi*A}zB*gf0jK;DJs!O3V@~;gffu^2BmZlrE`qPz zrD-Q5UBU#XQ?1dSd^>TWeOHxm6GX3y>ITWT0%0fVgSGhg-8AlMdjbU}2)#DFoi-cN z`Kr|4F#!mNz>oGmrg^~OH73LjjY-=!Q0HFJV6(EDG!`GE-WZVfsH-XEdkYkLO61L~ zntfE8FY)l|ygCg@eF&u5gX9MtVbz~De0qTyIvGx?!{%YS?m)2sq3P#)PetF)_xMDw zjzC(Tbz8m`OA0c9e2ZM*@dX#&4&>yA)rFzaK)MNUmCPXj>Wj;40;QpPTlu=4CP03G z39K!2%tq={h#mq^4TJ8vKPWsL)refaDuGcCROlNG^$P{}pi9$3Jo{o56aI>R*I!`2 z=qUtdxNyQGk#~Ii^o>$fO&5i*P~c}ecauY4Eh8Pwf*ftoH~tVTa7-4OoC|&FVRo0zs`YXp9gozLqkfk$QZH0$=uhi6 zRwwgLUM8f8JJ$Kvu`!!rXTT&zJz2`-2wYrP3hIOZa;gP_~oA%CnIfyBIgYs=;CH2 z2EClyvY_FgIsFK)D{8UszelETmQs%mK+1RF@7R!Ba~%180gnwEBYVeerF-fwJ+NsB zq~%DvHWK;8FT9iJS<~ku;dhSV&ROimrx5J^r-@88 zDU&zF4XHsu{*EAze{VrqAGSN9l_E;Z%>(;Q||+oK}I9Km2{< zC7fpG1f+$fcdB|9=au*;(>%`xZ+G6cD&aYyZ%Sd z2!A)hJDMF}JF6jTg-+mqZr|dkcWSAlbX&pR*uM}#ShV_DF>e>?%oNyy&Aevntq@_AGX~>M|`P1Pg@wES@EmY*!qVJNa zyn5br-X#7Oe!1((CvHBD&TWIS^NV?OPDmu4>{SG5boL*8mTY4)7B6%?0?rlHEK=tj zrc4gwH-nm}J6D>kuWU}@mrp0?{sKVPvmR@CrlESLjg#tD5b zPtvzedN9|^l&$Nw8SF2+V$%~(+3*rmF?L=xT~F;-b^(gh^<gEVZ&5yr1)^VFBM>}I}|QY$cNw>h3oywR-P{| zoaCHYEn)v7FZQz8MH*KZY`F_1m_*+dyeZQtYc?w7j^nUtyIMBwRS+bV)1DZQs(8_( z9PxVg3ms*@Aa7;4Er*Y!qt2S8DHF#ohc0L?4h{A{HhzT||@R_UoD?l8DHcM#yV z9ei?QH++&Cf>P`yrZX~zIgM_iIE{{m7sl<;tZ6+qd2l5Di`vd>F5HFE%E|IMm-^~v z+WT9X-VY706u?zlgLi-b5}0LSr?z%Y1izl~tfSK{c8bpb`O{EOZdh+Oc6w*QqiFqK z>q`#o>Ae^f^ELL~W>^=}M_!(@AI#sn;q}vPE+j|2#oeY+VA)_ho?71$9v^9qp7s|w z<&?a$zP74=Xe)d_u^k&Fo&e7YjUcMUYs|BF%lRu6_dzkM%sh)5Sbs=uO8a#TabtxS z3zcK~>Fj#!IDED7F|8r$&u*{2%~q`l!eenQ>uTxrjO+N@zn=WFcMeW}mdsvyFOm7x zWn#?uA~^))@^zU+%Qw}Na3%jue{TqSF=v)z7zOpDCI`7y8G-p_Q zF)`reS6OJfvuW<0=9Rtj&ksd5H}Bp9HB=K|V` zF;${`$1PX&vEv*w#c)zDIiTYxpm`9r0;@IEA{SV^;eBX)&Ww4V2x485JF8x?J9%-G zEajKEz@am-aPNE$=ng$Axg2SPZ>|pI+oH|k!rnog=8+T5K*F$XXxw2agc+{`sx>^D zx`TPuw^OD1Pm$&y;piPU!@xpa3LENjwbLy(ptvc7XYghzoxAPR8Gan?rCzJ19;7v` z|J2?GB|~0d+qS20?dV_Z7Cl$8YkL^#g(rf?yqnYmTmgPTuQ|;ZQ>PyI)A=y=-_!zP zJ<{23pJ%-LQU~^Rojv%r3CDK*+GX{XoZ(@No@|vG%s*rtWr8uaP#=zrx* z_v|gC&xO$O&E*zzpVL{PZMZy|r^lcj6 z-~XOfuKxq)ntfmv-JZj=SK6v|677>cs3UKeAB;zy^j1wvhC^Rp9kuUuJIbq67%}7l zC!AIDrX0f2y;=zFK>O$WSo1B71=oPxm)Tg>Z4hK<4*~~6I=AuVA&GKD_ah~z_@u~fIJ-cOo-2{di?QE@ z1%@f)m$p+Js ztLR=E#c7V}{!aId=MN;owRLka%=j1En!OYP^9R)Bx!T&K4ui%zSr9%o2f{)e{q+}% z+cf(BXu9sWp1wEU#8=6vNM;flB`f-z=Vs3`%ieqMy`@A{BrB1S5hAuY2#g1nlFZ^_{qFUMTX>YodC68{>1kdt9m0a*X{y z^)p4Ca1tfP$8oDAQ{`cEUyS>U2{mF^>Ph@$Io`!$Z06baF|zh^F9WsC&I_E;iw?e- zv`tJ3$8`hjw;z@;kpH~jKTUbl}WUIMN_A40MhStTn z3G**ASA1fZ_8z={>r3euxryHVtSBS;Jd@}LxfAcs zyFBzHb?KEXQ5TB$cvCmz1w~HFlG)k3cugJhi>$!s<~)@*!XL_Fy^9;FZxpZ2oR~m6 zJ9Ho%TPyR}lE$^h^Fh!_!v;(Jimij8^uWx2Vf^`)r+AR50s{wS2m3bM zxxby37x9y<`&HqqZsv?}(PQ%x25^L5pBhd}&W7n9)9X_A5{G50?I%Q)n}->E&l?s_ z;22)a>A%W|U0*lLfyrz1xbd~XS^iY(b)>w$yQacTx|tKA=3mSDFGyg#^v_~9-^$t{ zz-z#|*96>>m;<(>bg^#P9!8CG|M|kmJ72u<{r0v-?R_)FoK2Mpn8zu%ZM90BY*^J` z-=8J%{^48^GWQhAv!CRcdbP!Ts|BXa3q^doisvS9m-eWaA=EfmOqfF8MM8ban>kh@ zWJ83w>J}tXf21%g{90dOjDE@Qu+P=vLYeZsjC=hHSjW&3wDi;-f~FPirzPvhJWULaW#F+;V`)0i#c1dIgWy->{Y`x+`BS*8*_^2J z6?g7=$epnUCEgb#7tsj-NdEZXY{!Xv(9^x{5On)`rrhC!2UPbe%S&<6fG3G(vwo zdAEcn;jeR!NMIKOx7qLUSbCaZr5!UW3FtMtkWo`ZuTkBWc&FU+;s&@uVYsM#x0F$@ z$VZ{{@#ZD##b@l@?Ahg=yz;)S5J|~$=D+`F{Wd3B5pi5@_c;!ZJ1w9CC@!NXp?`&% zi$XnH3Z?nlv~55Ut2RpXHv4Aph39!JQL6-;RGv+ON5qoGRh6$GhnXMgN24+d|LOO< zg$(^F9_3aksP{b6ZawMWWU(*ppy|o-GHh19j207T!o1`Cj^OW3)o+^$2fP3#=16d- z0522pO7xkqj2ka(0L|~m{eolAkIUrm)@^~Wr6u}I2bc4oX0M@3EE#KO^!rj@{g=^8 zgf$kx0^eZ^3-RlvA>=g1hJnqDe$}C~Xw0_OT-dCPNzI>}x=nGSJZ#&)+WVV+IQwqIIlt+F){E&dZ_x#kL5W)@C%w zv`Hr5nXlv_J1OJNdmSEvijMfI*SzN9)46F2TEF-b`^30V+@BK4cNv4ztLmsd9rMBP zeFWU(Q0!~i8T(W!9U5Ax5m(-tOG*X&JdD>9(w>)NBhWZ{&)#0IDWB5m-b>PZW;P*p; z- z_%vhOq_h|Mr9l6M;~651K?mVJ z1-!k{vRq24!&)MR@yaJEFJPVgQ7G-af9nX9!Ev2Z9+c!=i&3N6(S&-)sLTv_15=dO zA$ZgKazwxTJmf@%>B#Q^-0R{RRyi1WO;*iq!GVsY7}`XK<|XAB!0S3Ziqbm3ib-TF zt!F&jyhG_SrP<-_HjvUC(6hAa&SL(sst2n)5cSLhr~WI@9q{=kt0SEVx>mfs{gt56 zaIgIom7{QaKr4!P-A=%xpdLoj#d;%Q>PN#ChAQ2zG&Z&H35Jh-Y}$C?C@CE1>;IUqRdzMH$;yX{gKr`kjCUT<>xu-|g{H?O=_AVXCAN+nLd6DpJI zrFpTcMKu>HUjY6VWRUQ!6o7ldzpbu9-W_|(QIp<^(aU;?JyzF9X+mUd`oyS363170 zLMrN3^)sD67)#layOnpOJ8zCKbch7kOO@lnXOme3_F8Y~ud-7@Z4oe8TPRKRbldYtX-}JXooc z_S)SSSpDa9Wg0hQrTndRR;rHz~5Y#Qrkf;jQ*DC z=?}Ti=-#xa?_W{W!(LPPHxixq%e|#~9x4tjQMry3CS3fNM{x%T-N0#V8qvQDa zjBGj;m&3(djhB62k4LsvO9V_CDIN{m%0<7P<@UGA85jqJO}k2O){oHdgly+)7cX)M zzYs3gt>u#;W+FY~0}Y(hN|bOyCNa-0HM?{%#vh|Wubm`@&*9Rm22tVQhkPNKCf$R z3&MNt2QQ%!^@=g(OMHAemvA0b)qD|~Ej-CqCD+r1fJKD)CbwG0$j3LAFlW^0O(eAp z4HcK>I2u_WmPwT06{=4Xl+;?Q4EH&n#aQNGUnjacZ!PWh#LsSYEE1xMp z>8pM4J6xm7F4M)A%W3n+bkkD*+O#pVIpw({-*Fv8(I-ma9;^ps{@M|HUT#VC|Fcg9 zt-irU2b3rC-QVP*=u`R^y9H|gC_C#fSzVhc2XE`iI3F#ppY*vr+e#j>xXG1rwg@%P z{R6OP!HG03)oeHSS!>0;Jx)@5xqQO7`C#&L`iARIC;WnGcAH0>UiX%1@{UEOv5OAL zeU*-g*Aq_Temp}g`nyxA`C51@SavUbm9xuK6!>mYKW~Wm{Hz`AUNJ%U@f}Has>j4&kf-1O)9U;e#F%R-tGUvoozbRIp)+V) zP!%oHVJcq;u;8&tKPk-Sw`}ZiNFTlU1=nfNhBh`Rqy37F;*6LjI5(FQ0go>6-f$+2 zQ;s@gO*6lQ>X-`}mYJ=N$rcV5=q4y9_7n!Cq)DZ*aYg zeR{{0)wz7-#oX2J5IO#7%^@pSaE;DoD5mi}ajfPDq1G$RFIMi&h*+83p?vdF87mo39jer%u~*82*^kX<@L+BYrB`<87W z^95e=?jlRt`+WPBQY0H`E%N9u2D^`n>rkk`EoH zT_bWZU+XCNw>4SVn+*Sv4ysmk8aA5Mc)T_h)tVoAPic=TNZ<%tUwtV~O<7B7ZgyQu z7V7wS%BE52-wm~?mmQ6NK|@TfoJ(+cy}umYjYX62vE2WdnMr-rvC^_IpO9U z0+vzBaxQdfM;~5;yaC_E?p%zVk2O=ZECSkBp-xo~NWbW8o_cu=1x{&ApF8cRMhg?U zf51U~;hLdbdt07(Q8Ao5b^0!CzIY4Z7;V5_u#fVI!|z)dy*-*y7yoq8&g=>QtUrKX zWxI>#l`3!{pFMnGUL#f466ep;T_%uk#60=6*n5e!=cJ>P#OyW$=@RxbK33~TDr#E- zU$ED=J=xznMD^-AQ2F+{{1KOKQsdj={3`Wk*lba|*cn1S6Y7@-&Dthb2W)6`5_=(H zzU7i9S-Mkxv|Z=x z9XyjZTpUVM^6`LcmsfJV-41%ZB8&Rgj3nF7ZrpA15t?xGmwsxKpHLY3ulptj2N>>i z->1&a?kVS#3sXJBC#P0t9FL!dC-B8mU8!};R>E>#B!Md=>Ofyt;+#-;0W7D5Yo~G3 zuXT9gnEkY@=opHPFQx$l#0C4O!oEon@wdl5QU1v0f_|Me*CkR zc4uF2;89c5%qCJevL^R1Wu#xCQ&UQ_(+W=woTTQ}2X#S9p7fCS_Eaa-xjgGpk`nfu z5U3C1ab$)G^{jzE==R6jntjt4f&M19H8%1_=5AgQQ;8no8p9_%U247{Spuh|;*x7y zLb*!J6*)6(i%clJoz;8-_vrQbgUotu{l0I!+}O~ael*Av9=3ko-KtW%ra^SnyakW`VB-3{@vKN(EEgS^MAKb9 z=!&ar&#b9!>VJ{WY;fX`sCcUGZl$HJilJef9YpEhM>r>OF=aY@kq>`e60Pq(prQf7DdfYp}gJEfrj^gCJr3f$anU?F*O;qLj?Ux z;qDtp6ZDF}`WDp9uZ$k_JG~*jPs@is$dJ%G#{RJXWYzBTxZLA!jGmDVdzLoFysv?E z2$BI0S++?kLSJqB>06vf5%dxP16X0fy4f4StNqD2!j=Zx9j9@nHLBwI8hsH#`zu!E zi8n?Yb6fce;2S4bh$eg&S6y3Pewi7-cCG)h%LHew*_Sxk&}oKr(!UUD4tNy7+Z`;S zKavSN0bNk2fM?J4%o6|JG~$L%-#IrYQ*JAR>~?ktx87HlfIC|A&$usmfR%xM(ZI#h zIpw;lCjl-ak1dzwsEoC=SZ^tSJAAyuc2*n+9_4PEet|!d3G2<33XkL`5l84{+mq^h zM9cE^q#9>PRAsGa%rR~{(9r<*DUO$F&ZqvoXKFII5xp6=#Rpg|!NXi;_%co}RNNTv zSXrm{t!bNJz))dY&qrI1ItF?J<$~?>FCGQLsruT z>Vnqpz&`1bn+mjH%HI_n)3^_1E%uhsX!?JHe5vX3K1%0NqF78@gKJZ_`_2+v$S?2X zo~*w+#rWMBvP1Qu((KI+4w2pX){;Bizv~v<$7;q+E-t5fAHVSIgRSJOr6sg+dq+~* zv=n-0wt;(2SVT^4*-~km2iFq`=fds(U1d?y%*f7~&l?-86VP4akkvP(^W+BG5N`fC zLiU(w&)`x5{z|Kzr^VrnCa~r+Il66Wurg+RK3tp;6 zgJ!w1>W7?LeSLbyG&0=Mu2Fcs`Lu0sXI9!2bHuCXKPp)38OvKp@H0&@+48OteGOnP zr^RD$z4Pt(^hTXt-AgsSy62++3Poi&V$J#Drzw3n#yqPb|kbQihQnIksUFT|~~Ckprh zQkXh?z%-g!>5_y$p}+6$^0h66Nb$txzPq{k;RQ;gi1O`ChEL)pY0|P%LU=g#-M(S^ zI)E?A^nfm8P<5y+HXditiJn zdLN&kMZhl6&U^}0tKrChPA5n+JS+X{Oa^z)`4692#6KELRNh2Dy9v~wZqwgJht?$T zAYFn7P>S*NX#)oW3gX! zZ+M}hvdqdRs`n{+!6c>AFxNfkEUvZKB?rL6Jd*EEj1m?Oej@fl0K*TU_E&+w_b5-q z$3_=2ZY}9a?MELaIU$7tITh3*zhz{H%_J@VRQ}!jV`bP=# zp@lyCDZDl38x49#uIhft zC-LfLQeGJvl0A#AqHdEL!UI$bjISeI9Q5b*8le9xtf*;?C{_-N=Dcd+_iE zbsBxK`zL_&;+>f@@(mk$QPGy+yG=zd9hRt79U4@s@u3&#e76K%;b{VPd9dsH<5=6Z zSkxdn=-E=P;yId1?5hjEw~l}H`J^~gzF)nK*5o!Ya&F~Ng@-pu@$c_Cv8t9$D?Mr) zSN)t8|5scc{KLm?aWTqVaF^ld#D(1QVkGWshrTA{7*uKUC0V!LcGass@E;~<8HMi! z`trtrGXxBhz&y%0O$FL(cwDgJbjl2j#$FX(jJ_l2SRI_q3)g<-@WLrV={IOvn)ahT zsd1`|B+uOg`qGc#d*CHs0aMn{{Bd3A=cYsQS4I}Eos*0U z3HWPu9Cl9W%F0(EkCN~^rq!>`nED%qz+d4K>qY1dp}4PTra8|HbHTf{D+u@r3G^SP93uoZQQo`HfYrL1>Safk#@?P3*=S$F|&jE zVO)CyN7K(DF(#$qs#qp~3+fZM%#)h`zG5d2($9_X8v%65w*lxEORZhi-^{U=p%268Yly#G*@ zUFhHg_@1k%ogmuu`Gq`PI9{KqH8GBkSw~F0yFb4XhP7LrrdcD!J0nt%=x zH$PwIUY53+zx!9b|Jl!H$Ed#?8y^AhOWK1!6Bw8$x>PByYOVmQa2!&)>vkV=rOiz6 z@49o9odn;)D^CZ?_k1u~0@Wk33Q^+y;lT_YPVGBIA_-5lTpf@!54}N2LsUS}% z`_sa>9omM<{7T^RkDfE|-Sl{9lsbpT7fW3)$bo>ZhSKoJC3y0+b4nX9_zio9)+^Ab zDknrPAXl!p)6uWo_k=rD$NPSimqczW@LhDX|6%@WdtcR~%2D(R?Gg!jjSinFrsCe1 zez-m|A+aIviR@3xUtx|_zQN!nDzoy0a9!ttoHIyTmP#zh8~djDFmeg#vzo{q|B3Q` zeiSsS7_z!A)ZzCg9hyKMzPq20HOpGFaP=E}2mF*Zy13(+g~Ae8A&!nI!|iVG<%(_B z^Fp&sJ)e8HN1Wmi4Z)34J4hU;M1HXR5*gm2aRHB=AN2>t2DOW3iXS z5<)#oXg_FO3vS)=3_SJ*M*kz{nPGUp-Hg1ly6h3#%Q%u1p(6_s*?Sucl@maRiFqwH zO1GLjOg$%9kjglam$BoY4050MPhQ$_PXH@4Xim=X+^Vu1o;G;14oxkghvkbycn35x zw{d%O>2D8Df~GaWH&dMkXO%DD5$787j#2aJg#T)2-aK9D8~8Ggetlb(bxq-}N7plO zp5R5f{b+B_-?&xPi##?ul)iK*CLC^VC9E4OZd5+D;{AF`cgy6qcll#A8)NQ~GVJ5t zTn_zJgf4V+HadqaCSzY=+;?|V<~?Z3!JRs=QF$`Y*j1LIJ`A9d+lq-F%|9}JmM-J# z3(psYwZnzJ$y;TJRvcQyoofH#?_(n9(@#g5x?iX03Dr1qZy;Y-pFx95x8oNZBdKre zQ0}&Ut90G8nMaQ^GvY5M@x$VooN7N+cIZ}?jebdd)a5iu&y##8;y&#tvYz*iP1BRT z>r-y}MV?+K2;5t_U|ePm>u}u~_GJFXLhE1Ei7aR8Trw#cdCs(ngCPxbSJTad` zUg@z z4I-ZiNri8hGL`==`9SPmF>gUp@!k25sa|9_z3&pG_aC25&6@j)Nk$m?JXoXu8%;cH zw9Ta?;6;{y+{5zW;|}d0dlgu&u$l&OXtV$#gewQX>qf^ z>`-(*7m4X1ik>@7_ZO|Als89cXZd5oW_}s1Thl~w@lO+;v1N?--Dv{1ed%Ebd zGfh0WW-cv=uEE2LyL0zT13Be-=ss`P9ohmHFz+l>B? zFb8Db?;W-8i~F_a?-EzORWw#kKdWk%?;V&#jc0A-%vR?Sz{pmu21 zai8Fi<}|a%3ECL8Rrd&rWP;MC1#d;hj6`}?B+=wjcP7@q$_X5Yt5hwlE!*5(EN(U* z^M_{^+ZGjD2ijBKtSj6#?Frt)v6aJ2{pi)hvg}vN&S*0_jAmyo=K0TpWySYzxmR2e zjWoY2TMxO%^F5Qr&|WunwT>0%S`hj~pw>-gCs>I;$W5ou@}ROsm$QBZ_c~5(VM^I> zPt@$l->Q6>Z(mAlZM~8fiGAq97J61|cg^`yCq^&vILF0wejeU$7m9rK zN>w`j{Rtl_R+X0T2odUg`X*dUMK7g}Pv1?UF`=B@q%c=pxkUsm?oDsTcHwKm-RW?P z15_+2SbR8I)L1ogCP%ipCpy=!C6hkyl&D3YuUXZpdXZ5IXGH%K6)7>fJSG2}A#I+P zphI<5@%qP`Xy})0da$;x;S(WDZ+ad~9X0zS>c1(k?U14;Ccfu7Ym#tbybTpe`o{O( z@74dxwc^+9+X8(})}!B;Zq>R+t>V{;;jU+Cdc;LGwZL;7=T7SfhHVxtZ*SvmXYf4E z`e^-SvIh-(Uz8?S36`sGRx(a`Or&zTZ)iM*d5(ntW zT=@jn^e0F5nN43Nds53uHEG8&gZxeYrt8bBv@*-mDQ(C+`CDA&5x%Qw$RjD8EACbp zV??&;!=uXY;Ngvi)6iB&#I7JOb$p)1DLnFBYC#<;{J6D4%ecyi9~`4T*$;$e^?{`3e|y)KT>iJUM%s$G5!4P9GYYfxv153rUoM>1tg zm~=mmZIp*(it-1G>);8>9d6F4g)GtgOXR9=#k9O~RvK_xjgQRRT^2Yuo*v;Z=YRYn z@ORn0XnDeCWMBB$=%Pt-*0brXFyivPYrOKT3+3CdBLC&pMBOPT1p3aTFx!0JS?Di! z19+usg>vt_k>D)(?0Pxy#T2>eeQZio=Y_2PJUl*uw_g7vl8(OBwRQt&n`=)~tN7An z+jbeN^+CM=k2=Yil`W{;y5>g9#pmUhhEqh9#?_=l(Z77q;S5)`h!IO~J>^z0BQ?~x zX#S(5UTscY3aq`1rnJXBxjXN&{d#+@F|`EnBZ{UxZ^Kw;2EQ3)CxnPnmn-|esj^*k z`BQ;yv2QXsN?z@=Uc}ekDZ5Wt!{@9Xiw*zfNMJXi4@A2iFZ3q=4)T=PSZYw`DPhe- z;l(LD&HaWf*I_LeUfh^hPH&?(>VHKf+}%Si6?zeHjU5LU(k?j_(|{vle-#IDCg~_o z!1c~5i>?dwo}L|ci0NuZ0lq7oX%@Yn>b>nDx|Y-^F2GT1JKvqM)>#;T!go{J$5q_x z_eH*c(U*2U{3SMgC4MvZ8l4(lOU(Gxsz4Wjn}E^gWEplqYE#e6 z{CMeLZvG>R-^UsZ{^B)0*uN{$mhc-_Ie&;&UWI>=ab`>+XpO*YMP z3i<@RL8>;uS*HFKMhnc1(O_&GZQoWAYto8h7k%L?fup(HoXOjuX08#_;Y>c4`)&m6Pzv@B+Gg}xn06|W*s z@X%jN<#GEUTJguzs9oMs`_X(7xZ1`T({d=!9P>aZj)}kMK~=)Q<;W73J5Av0aodF1 zvzx3w?_}8zbF@dVv&Mt=J}g3InmQTaI?*%uuz-GG#RrZz^EhGSL=G9fffH(ma^EIB z`S<-08e~zEfJL%SqQ7`+?BV#em3eQChDHjlB-EC%tl4r?^+&r!g{t{V|7aygRX0#u zBD~TjUey$y{M=2s-0CW$b`-XYv3{#4I<+gU%jm3ll${@s7vt}orHx?auh&V$#_C+_r2TE;{2Cf9ss4>Oi7K zCGYITtzI{WevD^uDk&^fI<0a-8iB9ntjV@?2iKUZ@7^Tp51U9I9J*6l_i)91hV_qy z(yZZrslF@ia2AbC1cnY8sjyn2uh46`1fG{$Z!^n@c{73UIpk-X(sjM7`G z$MAlZz8sHx>Y%ZNXV1yB=y;6fdt}ahaoy|z``Ms3Ql=bxzL0>BEWm;nMQRzmZdH_0w;~)7$r~PGBTSd#p zQKwOj1=doU?`y-rCssA#>D55)xs#m=j3>;Op7QUe{Q09L?Z1{P2L)Ia;Et*TtRZl_ zs9_adm>#^e5yz{?g9FxZc36GI;RTvKvSS!4OuY5aFjxRbC-Nu@LwpNQ*-TMptUfPy$^9l zHN2O)T_pXu8cM)O@|&@wK-=d}x-a_%%;0%5`xdMtG>!yEN${_zj-s^q6euT~aE2qTe4*;5|0qunRiSQk^R)ZkTqu7Y%sX*VLd~x@=??LK!(*sa%KB zTFcw65?YX}+^8*HyfC4+|46@5kqqwQMXv+cGQ1boDTTsvq7>(G!rfYm$Bb^4@5QY$ zEJwccp;_D*b61>t1~*{f7lq@RNrJB%bq=xSZh_~xML!$a`P&2z>o}Ld4H9_5j(ZP^ z1F`B^`%D4I$d&#pzFC;1hkaoXk1m-OR>`bii6NkBGvv2|Fu4iJ#Z7{ zqp7DQ>Og!e4WU3ucFWf#f(k< zFV$KU_@U=PCs|=O`bBs@_omEB`;;buhhKvGu6(9{BcOMqLYsts^8d)c)njL%S9qV##3g&@?C-qGp= zUmBdMIG^eas0>Z9M(%n0lg2q(qh^o5L)9~F@*g4XpEQw`nw(|jDJPAs!#(Qth9_Gp zn?*Yr%Cp+nca~~Cp^K=~ydJp1_C;|o_|lnQZ+#%9v~Ol8zx8xn6;0{A{m<|4rRhC^ zhc%A_0|=Q0!?TKsnfdz9cAFVlgKYmjjRSmsN!fh?Ej$HYU$9qCs9A^uqk;)_LaM*s z$751=AWG{BgX9~qN|0ckrQ5J`~SW?j)s$9n-E6G@yVcm&O15(>oj*s%n*?`@k!6 z82G@3?G4?o+D2&X=fdNHPCb2!6lmER*YCjx_)snLw(1Xb?L7Y7>4Q|5h8marqrQO8 z1qa+#-jF?81{l*$-xa_uZZfnsLszg)Z0s*yhmIMv!V-ls!eCF9=v+8S~Qt!vs~ z%BfIRQ@O*rQN6`Ax8>j6q%t`4A}cMQbabrJ;NZHc64=A18pN^rn?-#0{%c;IfZMEU&+X;#_4`_jkSqUTyjWH*GhGEf^bv0q&oL{x({PO>{a2 z*Y9R7r`d0+7U=)VDGvpX!RNXOM&75PM_A<|g)29q5*=~>S*xlBaFy>_TqNitL)E*| zl;C1I<&h_#QDk0bBK?VP=aZN?p6Y#G%e8F;(6F-t4V1b zjLFzoH8E9Xrkn9@r>H0gt)6y}qwb|KFv|4#bP;3G-^mi019a?Y{@^|h_!=YCIc?h1 zjQT#d70=f^;!{@=_`vDcV)uYkJZIl4sq)bCmjj?(;#m2U%e8B(9uv^Rd@sRF`Fm2n zX70Zh`ue#E@D=;`glHmErg&(UFU|irRg_IlrYqQ640Eod-_(5T%IB&z#rUP_7kFwd zxqVmU!WQ(#u8=s=qcg3(xmj-b%BJMEg$&GRf!<&H`+kbT6#rY-Y2sGa;Z0Tji;U&x zBzTv8S3dylKve8|9eQcLRHCm{z9yrKdT`0JlT{9*75M0tHw!XIl`kOsH9FVdLnD*_ zn%*v{2;X*qS6949$bV^9BP&L45O|blJ^Mx(ruWF2CSk1;c-)lE%C8n^d+;TXv&-W8 zU9RcK=tTFT8+q&Q6MQ1PY~zRi){JaTLtaFROVB?IuS}l#J7~}dSBczOyvXk*fnoa5 ztGNaEInJ&JzH7YF-2yp}0IsS&CB;F=2QZ)M*ynJUxSL%}d%r(N=}wIEvV=w>^qoXT zk_tZMmjeO>G9cOZ=@#~1zC%`dd4VpSt;hexFIL`PWt8Gi{CSo6O87=nbG<;m#M&+- z@VLs4MGJXb_PA-spKe9UXJ*b~``hojfAC25%uC@RPPnd7e1$3hcWEwBH<7K+JL_Zb z`O@q5QM5Iy3t8W~Le~=4n=%IvBaakUEmPl4&xWp`4sDC`mO7=3fXrC2rOqmCjnZ#<2j0~YZvR^N`koh)>%P&O7I!nA^oeU6elM_buCahaI%l!h&l*(9!de?L z*M%%E_GfFKGrTdPH-AdpC+{@MPTiQQ3(M8#O~vxI`pjQ@M7M8!1^er+mlZBAm3qzz zc8iK-r!P`Y>Ai$P-eOOtJ@>@Y?jc$U>vl9`SCD?Km>oa0d_b4By%B4NMB+ZelQMsx zErmsv<=hUp$a3T>zK%WPe1`rei?{!2m&V-YRcpRU%q6vOS}RMx$>gXbTR3~{9#PkN zfPTG8PqE8mK27OeSc^@v<{vlm=})_^t63fB9YCU32 z!(J`nQG;E1LSQyE6^r?1Xbrut^~1(LYDro<#LoD0cc-w5!ZXJ`+t8`aUG?m%mGKVg zk62$jO|8T0_9dzNp~qZtU%!-x&g;dkZvjSD4{P$>s$8zf)JiB{i zHus6xCyMtA6XlxC;K(~AWGk1##`9ghxM7PeG;3xYPTvu~0`VxC^zbmR8 zJo%QsS1KfZw;q>+rX&l`^)uP@F@k!9Xne3sbAkEgi>KG}ac2u{!R7Y6s^1{C!F5;N z|F!+Bc^pt%nSkd_Mt*Cz7cIg@3f=~4K7iS%7^1Ca8=zi)8n)BSssOEEzOx=Pp z-*f5Ps;Q=}Q?SQd>p9%{-7;A${gkvnI*YD9_Rwtb=)X;^-?DJgE$mk~jH);-r0CP+TdBp;rt)D| z502{^M5jg=qSBNQ3Vl_9PV~Ph@5Ek_XBP+Z*;nV;-p(Y7|FoyS6()1SL?@o)c2w+N z_(Du?<6vFzNT6?fS&vDiYX9~&^!t}A2);PC3c`S?+Gv*A|f<{Ck_ua;w z(%(~2zv?vftsf7_9>oVwj^$rP{Hb!A1Wq>#kuwg>Ak?4uJg*}~z4P{&a$ZqLs`^c>dsPE_t~vOQ|^h9J}=jm1_Oy z+7qsJYZ$(xs+ga@n_C%Can?2qx4vDZ{{P4q(bal=rH@N%pn_j zO?dU&%pDh2HGm(y+S8!%DObdsj6A+RuQ3fd?5$&cx$4C)RQK**aX+ap7fSrVd2Q!W zWB*cw`lUIkRmFtB-sIP$B6x8L7um5#JUeGaUpv;MQQoJ;z2vX*$B?VS?YtM`XEi3C zl6gRGzWj*p{$3~t?P&!+e@IM?yT%W@CdtkxcjMmNMXc6i_l9S5&gG$Kc5EoMYyL#_ zsu;5k?}S+DK;Cn_8FMc^0{8K=b0Jg?@89USFo9-&nP=i4Dc=|VE{$$|xLm_i=*4oT zhffyq+=z5NsCXBe*%Z&MeYhk)cNt$Wr$)p?cB;Hl-s*jxA38hHnU)=?Ol~2cuc1vC z$I<)eJ7}n7U9Hv2|7^shca>9tky6#?%UMV0O8ic)wy85)ybYsGHJ)=Rle0iin$&z_ z-Z|8zKF-z5@LJP=_r*tl>O06sH!h}hp2 zIp~FYRCmiOz5Ag-1guT@u)2tbzcVmMKYM?*zR#+#wzC8F@cFw=3_Kaqi)5I zf4#X>+zML#Z6~46grB{yj`8UOMLF&E@cCTs+C}+cJN69I4j+deku` zm9E?&x;Vf^{T?|!bQE##my>Vy4Was|wFj~4d2o{!IQgC^z2ug#e{z?0E$S%vr(+2;W!u|RduJNEddHqb}&7;TYs8jv(k*8v9 ztSdC?#ezBm&QtDz_KdaXK)d1M@Hc~Uv?c7-uaYr-uQRDN1do~KpIU>pzAD2q_L1s& zyWd%&u(vtyuBnr%k4M!m;T*6#=~bG<{7g7>xWqkYF2yytvbE5v_ zGorNXK;G%nj&H4c0RC)6#Zrp-etLYGo=f2WEnogEG9VcY4) zie#VBJKpK22mX~aMuPW*?mt?N8oZ6gpcK^`Jh97K#@JYCi!B#6lH&H^i%V%&E*2(L zua8~B=*--wz|2n+SIq_d5z1lM=NA}>p1VgmcT1`o5%q?z6z_gD1HL2xOODEsk$BH! zPNKxTlRCH1#qL@gpO3OwxCeu`h2xyc3@+kHC!1=UN59e01E$yATN~Ai{SqyfrpcHD z2MzqCxQM%#>7khgoR;eN&=q{_Y>bcJy_K zzK=FmNCp?5(t{^1BJV?fe4wm{_NS!gGxp^aU|LIQq;`f5voNMNTF;3Ut*LVH;%sf6 zOdGqkf_8bx%kb>A(ujk~O=QK>c_l`QV~tvJEqix{4l+F(T3qoUN4fbEY6Lng8@YF~ z(t0Lvw*1rzSxey=lBb38kJ#rXU@Y%BZDx$M50u=l8li{i>fRg?BygRjLn8JX37{UG z-b$qhffEA#q_~Xnv(ly}{9UfIxh!ftxXrWteI;}e_+>u5tG<4E5wk{f`(J=F0?tnDPdi7=uLTX zxw-Lf-9_oQ&0b&)eAK+{EqsBWw;ZaM{{GLDZtFu&zVG5ePkW>0)`2Pd3=eL^yI=hF8zIh>M4Esx@FvYSQn{|of&zCMZ|hpexY+ zKwL)9?majBure-I}TKYC-X#4-;m1B^y|NjF?Q(EMW$*#edy}5k4mfQbN{@TBZj?{ zkre8qbOkV$p_>eRj%)Xr%pLYx8Nm}R*sE?61HMZP?cB@=7`}r8JaPWDgw!2waZHhA zG%L3)t!=bVw(7M}c`IKTf|_j{gXa?WVej_`oX|2w2kt8`AuyMWb7uHRQ7ibmT>H;S z^`a?gN``K|Nf$rPwlbhU1V2w^%rSIow%$3+kw4G7Ea}={u2t#2>V*Ovzq+?CjW1CG zd#ikxE#4ks@SMmW+l+0?2b)T059r`Kf4;CGU!KVC&d_Bde9Uf2?Cva--iHq7&{vyC zY0RO0ztNKOpG=h(q)K>erHKT1$QZM&24ilJO};SMAN^^9&X%DEifD>Ivm2CF=TCX@ zcqQ4T0ir|O73}&WP)lm^NC#$^@*C7!<)Cp&x;`~v9HvYQ4B9B;1$%G;W}!Tm>ye36c`~HzS)HNP<*DV++pomOAXj24wOxT2KMEW)24tMUobQ#&2p%rG>E?1Q&XHF zmaoHo-N}U++j;PRufGyBhPZNZiFEn!)O5c>Uv?WNh1bc|DuSW)A{X(@Kk6!4g5p!wW>Y@JgWp=>#7HU zIjrVFtq*F-YD_ctvEhQ=?q3mSUbI<37P>~RlnMii_%=&#!zdw3?zv+4IbMHBy z&wD-ho-_1FFk~fau=V@toO+Rc|7;7feUVHl#X{2$#v@KgCI@x2XmjaMGW``%o@0yhitj)%_{57QvrN zLaSx4-p&{UH>?rZp`{E(BF_!ErL%EYz<#=I_bR5cd`>k-{<)pV5`e}9Ci%KzM1wRS zUqtWyRl~u%Cpigj$Lo4YFtzyy?ZWKgFsLrwcee30A35=~@IC@dc(UCd*mnL7w;b1v zlgl;FU4QfpmB_`6fQU(QTOA@Sn6vRk`eIFOH`nPQ_r$zT`1$ z%j7A!dtM+CX98&W)?e#zf> zyGD}>%|F66g=ZT-Bj04M{yFMKwN?)t2CJZ*za51pBF{!zSRd!!ERxLA{lR^_dsz*>WV~q_V%rxx_O#Lj{Vk- zQ`V(jALh@-Jnn<%;+`r0@wMFpqJz|!vu~&~-k>NZ&+lal1@R2D`gh2~)CBk^z9XFWrk5llpiwoCuoDKAMCi;|oAdnVR z$ty$k{PQR>>J|pp3e_9=vYm|bY@iH}Jjgi8T#X>1)fb?el0`kvT{w<1lN`qW^hJ}c zM@iF{;-u~!pl5m<(zvSRQ-n5wzihCbI8jFFfnC97DTVs0)TJg06F~FuYU- z9fo?qf89Gm$#Q#y+21uY&W)3|r`^yt@)`@%4VvN6_^WuX>tF6x=?qIUj>Fr`q3qd{ zjr{WJ3b?drYPVNmL;zl(7_NrnxH${M{ z-6t&Y9}hcY-|$VnOu#+W5bm6qu11(#{%EmmLw~@~eJmwC96rX? z6>|i1UU+e|zS{qLl5)(V7~-S#VB+g|IO$x#njPvO`+PH0Bck#ZqoOPbk%KYT{;K5H z_#XOv+r&NMnnKyx`tW1^WqvDw_I)VrN8?cevpd&;f4RG$tafh}S5D6nw)(|r?C?>Y zUSRWXCx)zc;q+R*D4-C{#$SU-x7|wbA*c8lvqFVFn-99@0UZPEVBCOo8k?6iR|+2z z=77h>B+`5au#rdFz}1%x<$aNvSooj5ywL3jj_A1^j#|+@L4z*<>!9K=m(NtofpjfR z9~j^`4$3xM#9CR$c(Bm{?z4C; zeg0Csxv4%soiQ34wB3Sf24|Vg$A8lKb}6{Esig6@{sP;lw}kl(PIH@X9+33y2(vjh z3`K8L*krML0`x6v@+Lj(4uzYkoxX#^J@ykN8c(_q(wjoG6CAL*C{4Et8_^}Zdc=Ko=xTgS5d zh8SY`(lIc1|8|%fxs`2y-c0>7GK3pU*aH(8@p1ZiH!M1+JIINGZy1=83>UG}`($D>YHVWy>qO6)8OkSGF zMs`US^8}G$H>8iWf4lzfoAl$!rkbt<^QrciWr9rVT3O zq21!3iFN~<)cO(9=S#ESsq~(1IQyNj6(+SKuD&L^dqP5bBKR0p0dJ&Pi&suC{!?*)HB+x{icTlJ1+j?5F)srS_MXJaMCi zhH3@owrQpgaJTU(8Op2I%ih<8+wmsJ3t~X$R^aK8HSM zh;Q!dVAd2T&C-KfsOMY?Gr zVjV92ahKEhB4#@;iSLY^$HE(3K+-z=;g;@vbZj1PVeWxCI;E&tH9?+wQ&yO-ArQy%$_Mpn zz7)Ez|39E{2k*7M5S?>`)8A2yM|^a!z(cHBXoc8!I#f@2AZiZ|{#VB8cbp5v8S0;5 z-=JxyG@Kz%#BR^8;o23=xR~oBZCtsx>v73G>KBUF3(V~~%n&!~W`gg-aY*Bfmt}h- zT+SBwLZ6M^u3hnZ)5k35rvwv@4>H-}$CDq^s^#V#KGgyvBA0~7vhi5LvU3d?>c5I+#Rdj>ma9W$?;|2|lFTSg^$3|vp zK>a37SYVFu!WjE!jlyMiIZR+@=bj7EW6W@@y!)58@G(}Y7A1js6BcYo`%&kK&#a-5 zSE(ep_=JlqwAQTz!WJaF#wQ=fV;>`3^@A1dg=RwE7oWVXEOy?DyXpBcpQT%I;%r*C zV|g&ucmOl7Dka@KgcG;ph2|$jzX{HQe*-KDCthnR6E9+l%{I__xkLBzQmVfVR2#41 zbJ#BQD^3@EPvccz?k^3;t=@VnjVV9>Vl-da-wbnSj7KpCq%+kgCtJba&_UR8INjey zYml4T*H%dRoPQVOC$W^X ztRPL(O7JalWElP%Kwe|jP~6#AN4{vzIO%4#c#RiyUY?DaCmtHki^De}{lCy`Jm|U?v_5x| zYT6!~ti1$O|D;_EfP4Z{ZNPcokw|_5%tL!1=>wIx7Tn3duh9-xQ+-OraoL}Ub1dS9 z8DHqOQhYAyUU%4E&k5>o`i7$_D)9{MDB3C;R*mM80{dfKEz4S z+6c^N3x>W2f9qwEQFs;cfIZbrFluABk+z!1sNZVzgV6P&XGlXa!YfAlff0A|&2PM5 z(Rp)OaMH>x&l%NSP0ia!t-)4<(rV`9lZmAq)u%o7^KT$m-dzvWD_md_jVsm8HzjY- z4$?~1?E4b>F0)}MTi(izk=~NLT4+#z+D>lPA{Li)JdGJ6=-$zd7P6s5Ie)V(hI;t} zeTJRTwfN=yWZZaRmO4F%_9Sih6Gm_`q3j&pD?USqYB^AI|eY_g(0AEa*%1>nHUT5zH(c?ExhyTK!^&yN7$|-lXfG0 zHys*OoI{7Y@uI$1xpoHKiQFADj6B>#X%c3)s?er{LV$k~`s;Kq}C%vPM-qiF8w9-~kj-aqRWG%WRmawjXv z;$Lrp<&6BLB*uJs(qJI&kRG|$f`lz5(BiHsG;f{Ft&$PkDzm{e?J#L5GrB$lM%>OX z-u1^*OA?)B>*3Js&>`Hj$AZ)RlmCh0U-=xkv#7h;Y;P_*e<%@&&*;7zLrB~h z$J*aPbi6%RjJbHf(5@I)-d5nP)HtGp=uc+T?Fu+Hc2wy)@XE6JXyLkryjX0Fw)nO( z3tC=nEsG2xWOWXbj**UgbjlIeN?5|E5Bc{KVT?3ujfWQ52IUuuX<7&@)XRs9=A2W$ zfUE!2W5rio;e%BbC+*5eH^^#WCz-e&wL4b9v0J-^E`}cW%LOI_VGRFF&l6BRu@2#l z74jbJNSm`YetE#MRIsz1z(_+1JY*l58lXm-#_dw4kfvck+65>lfYlpTN;CbsB4L2K z*`XzS5qU?cd=Me{R2KYkvE&0ATr>fXO#UflG`J5zrY#kr#q=v#}3sDpMj5PejWQxS%9!dxbJ`Deu) zruVWMGxg?^*0K}6TJyfmGbX$y`AG8h9jUepnHWR=u36|8uRY*Axr+J5a ztzyurU=7YX<*Vk06a#l3?@X9Ly80C_Ju+XsPG~(&dQjvCA~zyGG(sN!>pWZJ(u@U+ zbHg4xx-dFEP~HR7;|gg$pgK{OKBxVC35)%rA2I4viRzGvV^iKhIn!+N%Qi^fxTely z-v3~fjWP1V3iS>OEl%2-7j|PvS`dWaAuX+TS=0yb?yXN89mUA6FoBKLDf>mvDLe~^ zIiY@Ff)D*C<#56Qg+3RWXc`L+P!ro6irV;o> zc`PT665K9w9(KCpcwWzWEEl;f>06=MgieGN%|-y_XN2F*H2=Xu^I*lZw?I4%& zt3p2(5@sy`;#OEtq)VQtEhC-~nUm(nkw&uUr-5&}usJUW)?|TGMr&d8*sYAb8qe1q zL%RJR6Js*1UNm`lOVaCGaM$m>q?Ju&>LFS13+0dCyE_@=N%6?40)g@gSfwA#Xug~& zbC88Udi`gkVn3=6(D~7r*+SYtBM_P13mh{1I>7ihUV&p z(6KnjFOBtF20W z%|u4_v5_e(sGCE6{5bqEqxEwPODLn63IBb1!uAjGhndQgnhcCMT;v0+weC#H{z^LomfV7Ggr8c*l;G zT8nIBxnt-vT$eQqT3Ecnv(=Gs%hCd`*=w+iCX`o_#vVvY&_WmH@YUE-V9r8KBSQ8qzuNP!!9b@d`#qF(Xr5uR_xC<`-$ZT z+NfKWtiV@WQ(2F&@5<2ohoF<|FmNcYW+(Elv$rN4X`lGl=%!0+S{gdy=XUjA!9)pz z8}9(=MLVGLmoG*Pm-U-RLW|DMxG<$IHmRr&r2#j&*PMOyT!uc5c#$BbP0eF-0dSR7w7dnSkxTw$sx5wv6^$vz&(5pt+A?G5bRULsrq(|3YKMBUOoH?Bv z_|2E$cGd|9(yoHnS+?rXS!UAF>E6(G)e{_WG#33Yj={A$>#(thGlbo`2ZK_x!0Vxm zwG;HQ_)uTgh}QS>K3Rk>oOZD`EwA$(!G#LG$K;(5P1+ zH1yx8rMbkd_EXgHzF9c(dJ#NvJIexdqQIi@T^|3+K(=d`%`Z$Jf)U0=EFrWlzhXse zxY?KEuDpjhxWN|egJWf5`R+AVsQxWuGuQ;9$XFwU<)Og zC9NBX!}O2AzP90<>KuMPA13o$D{=jN-kPC6H3gQg58&iS+OK-wf4rb967-v!;)EeK zqQ>4NubPk_^J>Z!)FvZbv>F`C9X#XNq%C^$9qEk-(xV5{|G znRv~y+TYl{!Ao#l=oP%$8$bMTGsb4dh~CF}?`J6& zN-M#5{YLgh%7w7W=TIDT8Vi68CRIS!i2FV-!G*u?u))Fmz_HI%oM(6nZ#xWui_M(m zrGCxebGxn3JEs>*l2+jMfS>Hc-Z6? z&orF{YhU=&J|r9Qj71a>E=bY)_G-r#m!s2;7oZsT;3401;fzHCwN^zUj-h)7uewoI zZlvJrep{(-_SJmOz0$j^DqsLU(R+{7m(tkfu{gfn3nYAGgejbUcW#T_X#V|C^gh)* z9uJM<@7`PTp5=cC8)W`D=&qErY!Q^@orXhnAHxJAFWBYT4_D=0!0`FUvEnqX*&35h z+9E#B}zANZFvS*blfj$3@QuU z(f)7*u5y~lFQ)Bc)E_ig^sLwPd(1z^gx0Wl2&<<~VZA;PH}`qMI&?3SL|q#yhjG-G zx^jbL2TrvPOFsUTD!sbk%aUvyP&Scj=^@a((>)e(kZnJZ=dNigXS(WOTX##Awkn49 z3HN3+-+08(1PGV;zpds-{eZ49r|`{~T;|coi`J)@r#vHCO8z^NLb@x~!-`_LS>h2?+e@)^;a_?oUwg5`|?(<3k)p9S?4$Ox9L|1uNnLU`e?-$q4R~p~P4e_^d zXT)3>-)=T1oX0_(lO^Al0qpk6HR#djHcWIZMa#g$T#VTZ>*3fpbSxJ&-_GBdJ?Qk9 zi@(!Y0nH%<7`DS(I+t-cHlcfJ!{Nu@MBX_tnNcs%Iu{cJ_W{)q^!M#gwKg03oxBgD zzJxJZ?#yT|*~N(K{8*YjOxrOO_8Ira^$TwhHvUo;45(5noXJt+<~GFK`?!oY`nVc{Z#W9>$-NwJ&e(>;uwXpO+1rxO&Qg%l2TuAFnee`7BV*^lN2-Pm@ zH7i-vE2>)&ik`oOoy}^_my7v!$~eWv`9B;tjitEFLh3tc6f=x(4_F1Vd<+|mHpl12 z@k~440{mm}`5(DwNEUO83}EfQ*kG;D^?Xsg@jyC(lZL=2Q6 zelaU3GiAf4u14Y)UaN2z+C*F^vZyo>-CVvM4e(^4Tnu= zKkAoLfqpP_-7To|pE+Bq8^DN<*!#3V(o&}xgj~cQOHMPB+gspQ0f&kuxp=}?mx+4( z+ifLN+WKRXQ74pMq@&PyG!CTG>Zrz_=-I4{YiKoaBOdGC0A_7n%(|4l=8F?Q3vT7s zMYp-Zn=dG^_xQNqq+7Bzmj-B{Roe%AvF{;BAM}%(Wlp8KyDj>eo4z<-!|$0V<3VVS z2Ln3FLw4K;>OHxsW;=RR8sg=3NBF=YnNa4rOG>rB#Vi_+W7Ln%;<|}9I2=ENzOd$9|)p5-79-*<)e z8yf-UuWsWjOCx!fPddNQJqYfYXlw9#_0)1~d?y*NS_EPC=_Bm#)P|%%Zo+Oqe=hiT zd;CC52;IvT6^28}!d*yO28rKTlfpi#q2d6zDg@pAPGQ30JNPZio%@dPVAOwvT{9tS z>L~CyU4;XObb}a&rReB=49n=dO43t6d?Yj*7n+W=D*yWNF!R|i@oB7y>GC841kPur+OmAu8UTi)!fdU zP^vDPBlR_|y5B%(2>#o+p8P<2NsK98JKd67bhL%0M>|WZ&KT@Bwm$auuqJ$MtG3ZEaJtvkwCg!r8$PrLF0Hyb!~WMxfw4^zsim^>xbR_&SUz%I4G$O~nqfH;+Op&RLB zS8P43jGZ?b2peoAT5m!hYAp(+{nWzP#zGw=ja`G!DQ?gCa_>zzVci1qQ}yJL4XuIn zI%&?8>_nLrqj4wPT!~Z{yl=`rBn~3nxJdiTjYKQIYkZ&aIa-?pA+uc%eA8$Nir#W~ zl}|h!#@-z%An&^yNLRqD>$EoYJ`W)77CKimGC3IZJ{U8?L%f$32?F!Rc$g`^&g-D@ z9c#5;VLEE6s$lrkYDU_S|DH|z;pENb%Tkj#=^{p4h5HXZ#GHO<)c-oHbA5fa!yPkh z=f4H+NA%@%927OTobKQ7f4EngYwHFdiuFUKp2pQx193+Vtvg+23Y`P3k*-1N{^q_SxRCgpJnJ^%D$>Sn9kr3e zk_3;rkd~DKrZ0sr$9#Z1Cla5)gmqQy>_U4WJfb->B>$H#_=k%=ChsN@mS+<$@^iM0 zRBzW)jQks}3u(dc*cu2A!t`eB=a#NM#QebpYAB9RdwX#)ggHCbm2?n zmB!U#Ywce18Yf16z0-W*S&%#^+BI}Wp%qTeoR8F_GHEj4PEIvFV3zrW zk^W`Ym3v@x%lbe(EO;LOSm$bmuQA$Xi3WZ?7*lB5O+eJD6p1tx{aXmu?>Y|72Rw$RzGc|Mq6`|0-Y)8y zdNYxITHJ;UUq(KJr&uImnWq)d{7Iw_uvSHLmBtsz+XL|s^;tY$Hn~V>ON`8Q5*|(^ zj^@vj2H=UZU2I#w-k2Y>1dW0Vk*45oS0s(A3a>GycbGr|Ox z;PhF+;D7NBkjH2Jet%$;sc_OA3TYWOr0sdU6TcfiG_#V0E++i|q(QmOi&OY`=1rlm zfwT@5^)VnH-w=nbaub<=s9pBhC`1w3l`;lS`A9Z-6h_x1yt=?_TyT<LaM$q-cBG-1(=#~-6u;|9BsKw00 ztFZTn5u-lV{5f=hbayS4cvhiIOQ9SO=dCG%iswF%=xRdw#bult+(h^Vw)?2rQ-YZvIc-3uNio)8{H=x#hcIgJZHChaSQ0%?Mnf5E9Z z!g2;5WqX@bhA8G7#rcqDryQ_@s6}j0?Z*r9auwo0E%BptbWIv%J~1`i-+I_2XVTL^ zx?D@z1X;2fq+1?GsGbN@%cotGlmV(D ziz6SQlBNeSE=?*T=vrfOcE9oTbDy1^j!PPAt--S0@5 zD01t$q#eJB{Et(u)MN?Xu8wqV8PGGhn@kxi7(}*L=^ATlk~|RQX=@qfxSZw{dj9aJ z!M3vRe?>owOiy?V;Rmo|<3gcTp>)J%MQ9U&UH&6fsQ)j5eE%wRpZr3hyilPU#|^d$ zAC%gQy!dx#;cL1@)))Gbefk{=w%2D1%!U2A_WV-uZrs<_gq~$HWh*nn2^U^TBA=qn z2PtQji65YdzKf*%gHb*L!Z!$QATovr+5!CchW)_$ePN_SD2qKsIwKca2IsIxrb9sQ zCrO5_Ncr$JSif)tQvXmk?8_*BK))Yt z`J^|!Rr@txIOTjIKS00oF`$#Zlap5A&`CvXKZ6Yl7OW-tL@_$BdNWENxH`F7nZg1XiQ@`Zh4vEoU6?A9z56H~vU-;>9<{QNz<-)x4|q=g^89cjbO zR?%9AL*8Sd_XZ_@!(i;`;ebldKx}a_5v@+%Wx7iqD9wlaW9W%|ey8_wsAX~i?Eke^ zN`EC`@~s>^c{>9;b~()Fthj;mM_y!>LBsLF&$pN{+zhogXTiP}i zPAhg$=k+-ct=+468*i1pT+vmQ9G}9*VoLaY_)uCKezzV1@2s{% zj&=%O@jw`<(;s`3n8MsmjpVrcjn#LHecAZDW31yw9q9aeCwh2H;BS9NU|iN;?T^GN z-o!eK4c~Hrw^*2`OyAlRm9QPGG_8(0vQ{wY$Lztnw~peY$PIMYaVUIuD&bpt&4TEa z-61h`Ib<#ztCW2mi*!x2&x@WK+{qsMz8-{rPG3on4T6B|aPZjuANbtqhCc?5!|bzm zV1H;Ux|rU9HndmQ(h29W@u)oJwR|!Z>K}5>%4iEO%yv?iv!dHNq-!U*vb&h!yG4@k zjySwG#fk5@902+*TfzH`1Gkxbj&ELY8u}YIKpH>!$EQHt8TcN2`vx$d04q5u_m|Wz zeh`mr)J(ly*cfbWj$vQd$MBK)$UQbFY<>4G3_h6hga2yD@F9}7xY!(iUvS{!LUAV7yF1nGxs-~u+ zO`|ug;MsI6TBENzKBIfsUwo4~yLJ|HgTESX=8r0G6{pL16bH>W&^^mPj?Nt1pOjf7NZv_2MD*yxb zL*oNJxbvPK&s?-ax;S|N9I8J69#9^>GAUftD5jNhFiDyO3#R7t^qq5Y`2szR7&{*v ztY0#!4Q#S>2slQ{c<=OJo;AKcpJKkANnu$~Rj;A=eEizPRHdGxbwtKvN!OzwYJbZ7 z7=GB=oq2dXgpr5d;m4>CIO3x}tXS&?tH*Mny2YEc{!GKNLr~wl9*D7VbBN=+PCC>6 zVa?$Dy;)d1T7%Qe7t(pTLHx^n5Mwv`K^E_o(GP<1Z{XWs!$8{nop}{H;`b-6NOcA? z4^L)~BwxCQV4Txx4g8uliP1bm!$-a~@1OK7i*%m6-b#aXvQ-f_Z?{H`P0A_uwj4k}{ ziZb>Wt#uxf%AS1~sSTLs%%0LSiLn9>73zGaOo}nV&(#NvF>rS@%5J)|M|fDp0)()3m7u54TifPM1e{E2g1Pp zxh|AE*uk3b9nIfTR@Gur2u`ucff>832)j%KrXg{em`~m@xHlXLD%bR%TnWT+{8Yrm z8oU=AH`t&qZY!&cS;s1P&lDY5;CIYB6XLa2K$t8Y32r6Rc`{KylMT2;c*JRZ81W1) zFkS*yC&zKZ2+WzeMWJ5ikM(`vd>4?ZA8Ou1n1ccJrt@!WWK>0Pa}YQKcG2 zsv~HZuoP$KD646& z)#H>2mYX#wN;^{OBEY$70k1aO|QYW(zH;_ z_us<3Prre>_Oyq4a4SYy09V&eL&A)#BSBLzjh z1V+F!3Jb5|non6*(pf-f4}g^00l`IPeq@X_;Fh58cp zI@A4K`?EmQpw91qAbNV>C6xz|aXt(=`<+uw&>Mnh+07Yi zv0{sc*ImC8FAUEmUJWGe)kWPkX)o++Y^=7@^df$FxPAA5mU>$(cnFjL!;KF-{TllHVB96rE>cKp_^ArRK_w`p1k4)`mu z0tp*H?`{tscC&~PMzHJUNif911Xir0XR3FfqkAq)*xYV8INQt_r`erH;!r3F2^9JT zChbWDt2*y6_-+hP4QtnLy2#f0omWWv@)YN>Kz&5>u^Lh;`rz2K_PpQzUh2sX`tpcG z2jbgEPFTxHZ!pr#NPUDt7Yn{B-CqE+F%_K8g}~$qQJi!V>0CEBxuF@=tVWs_cL!{8 zMk8r!M!dj?``Cz&I&!OsA^3OGK1|W<0O}7(_L|cx_HP5yW6ntW4mH;=gR#Z{$GlO{ zx$#3Z`FxUlq+}x1tJ3bhH76a(h>NrZ5`Ev`e;5Q$=WdV1^BbeFAgaDfJpj{+4|tNYqbU=ou~OIIE#B%`YiR1C!XtW^&dR7WlBSZ}SyMSjX$21)Xs_-sr%@cAI1s~= zav14n*eG|T`Fe)LQz&#;rOs&OBP0@Eo>45c*0|uz1TOwIx?4Dt|9PTbr$LN(nA3b? z+_bqu%gdz6(fRB^CcFpnHeo&OeR16q>2;Fu5#$wMMB-uokv+hSz0dilA<6LSVJHYL zYW;Zzyru7hh)ZFxPfO6Y{m04%cS6z?vT4I=Mm)x2`n44t&xwMnJiRH5V93*b2$(?crtXL=--Za0mW|JmcO|i(4#aOCb+iSUE@^*YTX=g^v)uU9*JpPt9-OuUIyPtbhWInbr`o8n!P@Lk?n3~FJg zy4~>?`jp1?HA_8qf|I6_`o6vlr1g<>zVMdZVDCQB=gj2OXZXdJx{%J~-Dv%bLDQ#})0mtBv|y!0Or|#7Dc4 zFo28sZv3|^{BBbZy{C);iy1~j^MYS^U8>c~?BnxG>`?Z5w3^fpGcO*2d2i;?d3@#r zn~fsvU@TW3IgUH5o8V`kshs*sa2<>oNb9yM#n|V59<+O(OwUgENmP%Dy;m>6C9>dW z(&Q-oHE9s~tT0JnCviGN`+ZV|uCax0H-2-{Y+Tg1=+`4%jAi<-a`ALCId+sCzPt1f zhKId`|F%VO;mHNJo|5XQqyZGFB~fquu|Xn;pY;B*HML4w89&^~#izx_(5;j>@PbJUHyz2!()ft2zpK7b5owON0IlKxvsU_}F<%8=K z@`5P*AdS1y?^iv+l}u=Qx;B2H`iRIOgtwx)_r}$=o+}$;Y9aYo?OMM+O3yDPAaqN{ zPbRz*Wnnw&UR?` zI1wzS4+)l;r6LNKq{cAPovdPu7KQdE&4MO_HZ#IXTpVbkl7=I1 zKAtb8J#R@%(mH|*YH(S2fNwX}lJ?Z$lYK^D<(NZIem+!u7S7!i1ONKmBwb~Plx@}c zKaojL4h2Ci(tyr`wK;YZm!62jp|&H?+xUDKVf%t}##~{9#q>k=J_jd&)BVjTBRtz;c zAn9$ae{2u-95j!{q%)50I#h5qdCf*l=xx&8T;v8KtLbkX#q%ysM8Z7Y@X>4Gp9HT$ zSbNfblpSmfJq^?cHJaFB`eo=aL!!A3hdL+IMGa7%^MEwceI%_-x!@w8zC;UcGA}=0 z2anucj?U){K)k+#Vv6Kj6_MLf9-vU}LKylTB8~+>&=N*IJyT?lGSw^Cg&FXH&0|Ep z$Sc~N5Os$)`maz;=bFk?2dr~!Kit1+FKML-NO~5(&bJZxAqjl?aHp@xCK=T*jCmtL zg_Dg+Sty@5HjF$#6M%5Lh<)K`#&*Ui36+1m@%5hJu$V z)8KK_4{}ipo(=L9F?Rx^MK(m*0Z3=T3#?6AG#jhxZBz8OJ>lQ%Jhg&1$gfbo;3_gD z;@BD5k}eB4)iC9AmMZZm&oOo40uSm;^sK3O(*KZN^;cw=vd~VH=MnE#QwCK>rp$|x zf8m0!C@*1$v>7$HL|m&Zc3259JAMdlTaz78PDFkGlbPo1W3#ahY}k=6aEhhs7GRpA z8Rd1gLD^hN{n@g{_d7oG6d464-6}MwLKz;UZMLKLxglv8g*30`-ww(W_MWERwjiup zSEB&~rmhs3l+e-W|EDDrdcMm(dX{?OK*3!yWdryuk+zCUG?vpGqe*uh6kd+GZJ5VM zr?RulhKVdyHd<&de6-N$q+P>>rU1enh4Kd|)O*dIB?3@32=hnw7a1buZV!lC6h60` z3;fvC3E%#%UsI!FM>az8+A?Vbnesu5ru&A6_)Y_}RShXm_^mu}Xh%89WSVbUKQ*Sc zT<64WMwt#5-imTPBzywm2(~;op0dp-!W|1BT>wYAY~hPLX2Z0#ah&qi8qOvCPkm{o z3eGGEq5Bs<^yV~PK)EeaE+C04o;)DSFKNXIm$-Y>F}5X~Lv~}JewhtDZeK^&LS3YK z*OEU4%A%1pDNx_znWJN{7wP#{clTiX;Un;=e=zUxVj-HHTPDd}yp`Z(VK{GBCZB5G z9PalU4#xw!;pD>QxbIRDyY5sUE**aWGbS#^C)RFEj%kB>230VNo1^a=Pgu0B2(rf~ zakfKYPZqocPqTM;&AFNURQZD!)*9f}T}@Q(I#OM&>yOGOj{kLRg};z zuSFrwF=&ZFE8=*%Pghk99R|;4U&K>+7HT3J#ah~5hYj5_(aU5EOl|y`ANojZIwXI@ z>#d(dMPWFbakdlmSDCErZ6+tOTkLc51Mrxh!?e&Hh8He3$G9J>SY0=TU+H^?(>U-W zo1fqvT072V?LwSYH;d&R8_vx2?{mZ9zEYrri5hX^E&lq}5}f85$(j=(F#Wt1@*9}o ztDiZr`}#ffGuj7#9^8VZQ@-NxH4mWb{&=)ZoX;mdJI%kI%;WS`5k`03#y7?f1-^ErvbM%ffV~Hg;wyKXb*KqHf79M=s!@kyj$wx1W(M||l3BC5L zz&`Elq4&ex{D{;WtA9A)k*W^LhmGd&rtFgxSrH1G(r&SBtD4JM6AtpBvCUa%ooHtA z=qh~H`odKEL|W@D9lGSLhm4lX*lM-_V>8ym&*@=Bv1`O^^e1_bKGHR($j;m+ywD#@EO=KJ`0<<_rUoHw(7kB zJ}|lPD>t06ja~WdC-npJx0yqAn5bz^ z<8ijp0*29$@!^97s7Wg0CFaSHAWudyh6gBf_*Y8%)|CXZQRmL#{$J~n zf9L`eJhq^H!oBQ%OHQ!&Qax zUJO&;Hi7ybrZLyZx@a`m6C90a;7~W(TYA%I{1=+Qs5YQ|rU`6z`6RiNXR*=a=i#j@ zv3T@NGH};o80+N*YkQSQ?j!qR=cpDCJozc}9ZvhBSeEir=Jfp!)kXEex{&wh0}@WK zaUcG$mlH!7jgc~Vur=8FTEX+<=dr_!ckug@Ap-e!`kn8eTtGeH4JVq8K&oBbR)EmI z^+q^XTnM+DbdcMeeZmMIfaV0K|B!l_6;(CBUT2bd|HXE6-43v>=VhGbJ&u(gMTl^8 zQq;FQBpMf%adQ%6nbd&~&a~&azcUPWS^*YkvY}w$J3e#qR>|wX-IC9c9S~BFa(G%_ zNVi36X5E|i+G=5_+^rr-dzx&)-Yf5jbEn!3=b2sgphbDHv|wjEtqpY&M?R>h(paK0 zMqmAV&rNk7najH^AF0fk_(3V`?SWR`^)aO0K`@|up+nS7%;4lnWt=C6K6f`^I(g95 z@5X|y!6+=zS-~glNZ}Tqi`c#Ky6T(0_`i;Rv1iYk{WoZot;rTYB#mszHptTVIkyRsH8Pe&n(VUw?1bOveE<6Ps=oL8+~+yx zbKalxz4t!!{mLCwqSSbvI%NgeaG6ck1bW6%N%>T?{EU(@1buab?j^{3pl?lS`P`QD z)5cKiv3ZS!+tA%ZBH; zB;;x#MI{Z=mEB|X#4|&6-jY^$Zha2jue_AL)*q<9PMOSC2X7~glhbP!P|kn~H0a+N z3ZJ3N6#0@{)4ANJONPW1^$YPZ7jEmp?Y7L;5}S^A-pixaZ`7`hWuVv3lz;7LBhTE) z&@zP>Q&R?qlk<&MX6n{$bkX0@oHxft6Mc3WeY@?o6(4s@U8@wuUHpFKb(>R;phj^+ zhrPCf?0xjxe@mI$--Ysc|8CUhNJSnp17ACZ&8A-c9@{RBxnta|=E2kYb+=BxX{DNm zXA{{eL1jXD=M>gBtJ^7LD^Z}gn3cHNp@&txf%u41 zB@g~Bb)2CK+`+Mq?sA9B@bW~LLpxFTj2wnWL+^Lc{@LYQTs~Ermic+9D-Lf}=B_rW zX@yJ4vDR+o8(q>`+-sX!?z@p+)=KAhgUe%`ov6a}=^Pe2pFQr?QOtT-de)GoFTGJ<-Qofmft< za{iUmg!#~@YcekO4vuBfm%aY(l5?ozhCMIA80>wgH+$4c&_QuOYp`<>e_u2fAa(4Rip^uMs?VLK~Hz`%ivG$+w(cQd!5ECm6 zpXKqwKnlD-SPu*NZ%b-hq%T)ZrrO7667l_`Hmb&iir|mBOFe1R!pf8x1#3qK3Gp`qH`D>^J3vfqE$8;#-SS z_;q!6OK*x{#ENQlvlipnjQUHT3UHmtOJ^%-(1!Gj!;{|rk-Vk5W!q^FX!r%{{m2gO{s6K{JgROke$9iNhT)P~WrMJJkucf|b z3~PrTvWLtIM>-!=W1ByuIdL_q^jIsCg@xey;CIQcEyCeZO)!8q}mEH(qm8@>e%2 z?ho#6Nb5Hx@!a3q2|w!UpBq8*owckpaxC>r^nQ4ZF8tfPpH=r8(O}*lbRlvtuR7^# z*E*m4G`%A9D@iQ{e{jpj*he-|QHQ7RseP#+;S7!3Q^L???-dzfFQ&T}7#eM&<`WoJ zfz^d|d0p>XGK=q(+>+Yv&Uk?U#%UeqgLQC++*rAlV-OtW`s0~J8(hB4LuwA;% zWIvLc3Z{oI8n3&08@8gnLDck`GuORbh8Mn>$_@*i*`JST*O67N+ZVPnbdR8u=8T@Z zsndg=3i-kihKuI?0g#0$R0)eP0EDLidGHSy{ox?o2B?Lz4nT1ss5__ft} zT&EK71*^H_;j^k%?rwUN9M5mNeul0_Qb@sH+VxO#z8Llix{CXz4phgw7U)WzE%aU7 zOOmoL4BS)P{#mHq>TCTx8)7{O+(8p(|46kObro-Bpf2lj!Fx0qTRf27^JpcX?pv1N z8-(@v$c{7;KM-G7PY=l)$TK$`BQPvGUaHE)E_*0&AlLe1EqVKn2pU0M^Wvi z(R?tYtmXCr*Ll6p#OKH1`>{kyZ8N&X=n^i%x*F$$Jncjn2f?Q))OQ8HB{8c9M7$UN zqV|XX%ZSm#m9A`1f#00N9wV++(T|Pnp0nw(Q1OpiaszCsx_RxEo6-Vjq=`Fg+K$?YzlDV#Nn!zV>@;X6ONKjcqdQ$A69-?^&6x>Uk#HG#V| z;+pX{g>xfleJWy27v<$E)FQK0ql4gg7k-smNYl6G65>evMo04C#tlr&gGzqc8u}1t zgH|iBJNa{Kc6yCHeA_)!&|UEJ5L;Zr25NfjK8a@q)7WmQT6k^E@K}au{Fcfs>1hj3 zjhql3p|gn#xsCH^9>1nMYQb>Ti<5ZptrhUBJ(PH|dH)vYQjWbixX*per8gfgzX<%D zqXNcO;m0fTbhW=?2{A>`TJ>toX$>!};b)b6PGYq_gt#a1TNfgmh#%1HdxTKR&@Ngu z3SL;f>vYeaGdvfeF4=Z$*{v7%SbqfkMxr0160fMUFOy_^!jN1UA)$VL6xois3h<}YYiW7Rvl4};XP5Ku4&XL)Wl>R;(Lk1vObb%jQELaUjw*# zu_PXZ?}eeUx`*cyc$$ep-ocd_ajV}rj^asPv4sAC0T$M%A?BR8jk@BLeBp(l*zn)E z-21h5#z!r%jeT^IHfQc&@R+cQ3cFJ`ZFL17^X$up_MUjd8T_J1zR`Svp@yU&AH4gdu$r;ic>vS$yc%05kD)fGHkY@?L6c z%E>i|`)f3fV{Gwp3n=&X_XK_63zsi4j>Xr5Z`d+in_@mDN$&^xxPbe`x3$SSx3zR& zT!K5H$HczZ*K6UE+wRH`zq~P^3HBD9uR9zpXDxZ&O@Z&Z^Sx8-zKS_Ns-XoPSBMLS zr?je+>_^~hsb#dVZwp>Ay8%IWMaON>NgLR}xEYh9BW5IuXW;Ws@x1B3;TORS@r7F% zGv^C};G+=Hy>08XYguv=+k~B{WtKv&HY`Zg&q^X@W^4TftH*@4b)gq`E2R==`3R#&zob29@_f> zcfQ-Jo7~vOcg|P0pr;D|tIJhhSVg9qWWY4OVe+@SkSy}Nk|wA+b$run01 z_6O~Jeu%~M!lPL2#zfe_xx$Oq>vN?{1(0>iVpyz576*5e06UJhMt=KwVkx|1CjFz+6GUs zM|xk<$I{8YoWNLZmBc>QSN~Hzklt3oUMZIYRonhw)PbrQ8uNl)$0C}0dl{I^fgkSL zp=h;_1%F7Q-8XMM6)onpW_3*X6CT|?iLO+sNl%xiQnyntsGw%11r8O=BK^$KKg(4DsWy zM_Kxh2enP2l7e=csE_FP<)g25+ji<*XENK)50YNqX6(C{+-kVg z3H`;a#PJW;F?vhnrbYd^i>XALIed5^X2p0rxr6D?W{#?+V}IR_0r$9hggA1Bbxd*K5H Q5pF literal 0 HcmV?d00001 diff --git a/langchain/vectorstores/faiss.py b/langchain/vectorstores/faiss.py index 42f9cf1bcd016..dc83326cf457c 100644 --- a/langchain/vectorstores/faiss.py +++ b/langchain/vectorstores/faiss.py @@ -180,13 +180,20 @@ def add_embeddings( return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs) def similarity_search_with_score_by_vector( - self, embedding: List[float], k: int = 4 + self, + embedding: List[float], + k: int = 4, + filter: Optional[Dict[str, Any]] = None, + fetch_k: int = 20, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + fetch_k: (Optional[int]) Number of Documents to fetch before filtering. + Defaults to 20. Returns: List of documents most similar to the query text and L2 distance @@ -196,7 +203,7 @@ def similarity_search_with_score_by_vector( vector = np.array([embedding], dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) - scores, indices = self.index.search(vector, k) + scores, indices = self.index.search(vector, k if filter is None else fetch_k) docs = [] for j, i in enumerate(indices[0]): if i == -1: @@ -206,54 +213,96 @@ def similarity_search_with_score_by_vector( doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") - docs.append((doc, scores[0][j])) - return docs + if filter is not None: + if all(doc.metadata.get(key) == value for key, value in filter.items()): + docs.append((doc, scores[0][j])) + else: + docs.append((doc, scores[0][j])) + return docs[:k] def similarity_search_with_score( - self, query: str, k: int = 4 + self, + query: str, + k: int = 4, + filter: Optional[Dict[str, Any]] = None, + fetch_k: int = 20, + **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + fetch_k: (Optional[int]) Number of Documents to fetch before filtering. + Defaults to 20. Returns: List of documents most similar to the query text with L2 distance in float. Lower score represents more similarity. """ embedding = self.embedding_function(query) - docs = self.similarity_search_with_score_by_vector(embedding, k) + docs = self.similarity_search_with_score_by_vector( + embedding, + k, + filter=filter, + fetch_k=fetch_k, + **kwargs, + ) return docs def similarity_search_by_vector( - self, embedding: List[float], k: int = 4, **kwargs: Any + self, + embedding: List[float], + k: int = 4, + filter: Optional[Dict[str, Any]] = None, + fetch_k: int = 20, + **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + fetch_k: (Optional[int]) Number of Documents to fetch before filtering. + Defaults to 20. Returns: List of Documents most similar to the embedding. """ - docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k) + docs_and_scores = self.similarity_search_with_score_by_vector( + embedding, + k, + filter=filter, + fetch_k=fetch_k, + **kwargs, + ) return [doc for doc, _ in docs_and_scores] def similarity_search( - self, query: str, k: int = 4, **kwargs: Any + self, + query: str, + k: int = 4, + filter: Optional[Dict[str, Any]] = None, + fetch_k: int = 20, + **kwargs: Any, ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. + filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + fetch_k: (Optional[int]) Number of Documents to fetch before filtering. + Defaults to 20. Returns: List of Documents most similar to the query. """ - docs_and_scores = self.similarity_search_with_score(query, k) + docs_and_scores = self.similarity_search_with_score( + query, k, filter=filter, fetch_k=fetch_k, **kwargs + ) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search_by_vector( @@ -262,6 +311,7 @@ def max_marginal_relevance_search_by_vector( k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, + filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. @@ -272,7 +322,8 @@ def max_marginal_relevance_search_by_vector( Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. - fetch_k: Number of Documents to fetch to pass to MMR algorithm. + fetch_k: Number of Documents to fetch before filtering to + pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. @@ -280,7 +331,23 @@ def max_marginal_relevance_search_by_vector( Returns: List of Documents selected by maximal marginal relevance. """ - _, indices = self.index.search(np.array([embedding], dtype=np.float32), fetch_k) + _, indices = self.index.search( + np.array([embedding], dtype=np.float32), + fetch_k if filter is None else fetch_k * 2, + ) + if filter is not None: + filtered_indices = [] + for i in indices[0]: + if i == -1: + # This happens when not enough docs are returned. + continue + _id = self.index_to_docstore_id[i] + doc = self.docstore.search(_id) + if not isinstance(doc, Document): + raise ValueError(f"Could not find document for id {_id}, got {doc}") + if all(doc.metadata.get(key) == value for key, value in filter.items()): + filtered_indices.append(i) + indices = np.array([filtered_indices]) # -1 happens when not enough docs are returned. embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1] mmr_selected = maximal_marginal_relevance( @@ -308,6 +375,7 @@ def max_marginal_relevance_search( k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, + filter: Optional[Dict[str, Any]] = None, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. @@ -318,7 +386,8 @@ def max_marginal_relevance_search( Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. - fetch_k: Number of Documents to fetch to pass to MMR algorithm. + fetch_k: Number of Documents to fetch before filtering (if needed) to + pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. @@ -328,7 +397,12 @@ def max_marginal_relevance_search( """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( - embedding, k, fetch_k, lambda_mult=lambda_mult + embedding, + k, + fetch_k, + lambda_mult=lambda_mult, + filter=filter, + **kwargs, ) return docs @@ -522,6 +596,8 @@ def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, + filter: Optional[Dict[str, Any]] = None, + fetch_k: int = 20, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores on a scale from 0 to 1.""" @@ -530,5 +606,11 @@ def _similarity_search_with_relevance_scores( "normalize_score_fn must be provided to" " FAISS constructor to normalize scores" ) - docs_and_scores = self.similarity_search_with_score(query, k=k) + docs_and_scores = self.similarity_search_with_score( + query, + k=k, + filter=filter, + fetch_k=fetch_k, + **kwargs, + ) return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores] diff --git a/tests/integration_tests/vectorstores/test_faiss.py b/tests/integration_tests/vectorstores/test_faiss.py index 792d087f47d15..dfeea9d12d145 100644 --- a/tests/integration_tests/vectorstores/test_faiss.py +++ b/tests/integration_tests/vectorstores/test_faiss.py @@ -74,6 +74,28 @@ def test_faiss_with_metadatas() -> None: assert output == [Document(page_content="foo", metadata={"page": 0})] +def test_faiss_with_metadatas_and_filter() -> None: + texts = ["foo", "bar", "baz"] + metadatas = [{"page": i} for i in range(len(texts))] + docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) + expected_docstore = InMemoryDocstore( + { + docsearch.index_to_docstore_id[0]: Document( + page_content="foo", metadata={"page": 0} + ), + docsearch.index_to_docstore_id[1]: Document( + page_content="bar", metadata={"page": 1} + ), + docsearch.index_to_docstore_id[2]: Document( + page_content="baz", metadata={"page": 2} + ), + } + ) + assert docsearch.docstore.__dict__ == expected_docstore.__dict__ + output = docsearch.similarity_search("foo", k=1, filter={"page": 1}) + assert output == [] + + def test_faiss_search_not_found() -> None: """Test what happens when document is not found.""" texts = ["foo", "bar", "baz"] From 232faba796e6fab1159c57e63a3cc999545f2089 Mon Sep 17 00:00:00 2001 From: xu0o0 Date: Mon, 12 Jun 2023 04:23:22 +0800 Subject: [PATCH 27/46] fix: TypeError when loading confluence pages by cql (#5878) The Confluence loader uses the wrong API (`Confluence.cql()` provided by `atlassian-python-api`) to load pages by CQL. `Confluence.cql()` is a wrapper of the `/rest/api/search` API which searches for entities in Confluence. To search for pages in Confluence, the loader can use the `/rest/api/content/search` API. #### Who can review? Tag maintainers/contributors who might be interested: @eyurtsev #### References ##### Cloud API https://developer.atlassian.com/cloud/confluence/rest/v1/api-group-content/#api-wiki-rest-api-content-search-get https://developer.atlassian.com/cloud/confluence/rest/v1/api-group-search/#api-wiki-rest-api-search-get ##### Server API https://docs.atlassian.com/ConfluenceServer/rest/8.3.1/#api/content-search https://docs.atlassian.com/ConfluenceServer/rest/8.3.1/#api/search --- langchain/document_loaders/confluence.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/langchain/document_loaders/confluence.py b/langchain/document_loaders/confluence.py index 05806412cb18d..a90cfb801f867 100644 --- a/langchain/document_loaders/confluence.py +++ b/langchain/document_loaders/confluence.py @@ -1,7 +1,7 @@ """Load Data from a Confluence Space""" import logging from io import BytesIO -from typing import Any, Callable, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Union from tenacity import ( before_sleep_log, @@ -253,7 +253,7 @@ def load( if cql: pages = self.paginate_request( - self.confluence.cql, + self._search_content_by_cql, cql=cql, limit=limit, max_pages=max_pages, @@ -292,6 +292,19 @@ def load( return docs + def _search_content_by_cql( + self, cql: str, include_archived_spaces: Optional[bool] = None, **kwargs: Any + ) -> List[dict]: + url = "rest/api/content/search" + + params: Dict[str, Any] = {"cql": cql} + params.update(kwargs) + if include_archived_spaces is not None: + params["includeArchivedSpaces"] = include_archived_spaces + + response = self.confluence.get(url, params=params) + return response.get("results", []) + def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List: """Paginate the various methods to retrieve groups of pages. From a7227ee01b55aa44d51474d36272539c02e2bf45 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sun, 11 Jun 2023 13:35:14 -0700 Subject: [PATCH 28/46] Harrison/embaas (#6010) Co-authored-by: Julius Lipp <43986145+juliuslipp@users.noreply.github.com> --- .../text_embedding/examples/embaas.ipynb | 159 ++++++++++++++++++ langchain/embeddings/__init__.py | 2 + langchain/embeddings/embaas.py | 140 +++++++++++++++ .../embeddings/test_embaas.py | 58 +++++++ 4 files changed, 359 insertions(+) create mode 100644 docs/modules/models/text_embedding/examples/embaas.ipynb create mode 100644 langchain/embeddings/embaas.py create mode 100644 tests/integration_tests/embeddings/test_embaas.py diff --git a/docs/modules/models/text_embedding/examples/embaas.ipynb b/docs/modules/models/text_embedding/examples/embaas.ipynb new file mode 100644 index 0000000000000..cb5132e8c6751 --- /dev/null +++ b/docs/modules/models/text_embedding/examples/embaas.ipynb @@ -0,0 +1,159 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "[embaas](https://embaas.io) is a fully managed NLP API service that offers features like embedding generation, document text extraction, document to embeddings and more. You can choose a [variety of pre-trained models](https://embaas.io/docs/models/embeddings).\n", + "\n", + "In this tutorial, we will show you how to use the embaas Embeddings API to generate embeddings for a given text.\n", + "\n", + "### Prerequisites\n", + "Create your free embaas account at [https://embaas.io/register](https://embaas.io/register) and generate an [API key](https://embaas.io/dashboard/api-keys)." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Set API key\n", + "embaas_api_key = \"YOUR_API_KEY\"\n", + "# or set environment variable\n", + "os.environ[\"EMBAAS_API_KEY\"] = \"YOUR_API_KEY\"" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "from langchain.embeddings import EmbaasEmbeddings" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "embeddings = EmbaasEmbeddings()" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Create embeddings for a single document\n", + "doc_text = \"This is a test document.\"\n", + "doc_text_embedding = embeddings.embed_query(doc_text)" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "start_time": "2023-06-10T11:17:55.938517Z", + "end_time": "2023-06-10T11:17:55.940265Z" + } + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Print created embedding\n", + "print(doc_text_embedding)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 9, + "outputs": [], + "source": [ + "# Create embeddings for multiple documents\n", + "doc_texts = [\"This is a test document.\", \"This is another test document.\"]\n", + "doc_texts_embeddings = embeddings.embed_documents(doc_texts)" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "start_time": "2023-06-10T11:19:25.235320Z", + "end_time": "2023-06-10T11:19:25.237161Z" + } + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Print created embeddings\n", + "for i, doc_text_embedding in enumerate(doc_texts_embeddings):\n", + " print(f\"Embedding for document {i + 1}: {doc_text_embedding}\")" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 11, + "outputs": [], + "source": [ + "# Using a different model and/or custom instruction\n", + "embeddings = EmbaasEmbeddings(model=\"instructor-large\", instruction=\"Represent the Wikipedia document for retrieval\")" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "start_time": "2023-06-10T11:22:26.138357Z", + "end_time": "2023-06-10T11:22:26.139769Z" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "For more detailed information about the embaas Embeddings API, please refer to [the official embaas API documentation](https://embaas.io/api-reference)." + ], + "metadata": { + "collapsed": false + } + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/langchain/embeddings/__init__.py b/langchain/embeddings/__init__.py index dafc3e64103c2..a54ea9aa4709c 100644 --- a/langchain/embeddings/__init__.py +++ b/langchain/embeddings/__init__.py @@ -10,6 +10,7 @@ from langchain.embeddings.cohere import CohereEmbeddings from langchain.embeddings.deepinfra import DeepInfraEmbeddings from langchain.embeddings.elasticsearch import ElasticsearchEmbeddings +from langchain.embeddings.embaas import EmbaasEmbeddings from langchain.embeddings.fake import FakeEmbeddings from langchain.embeddings.google_palm import GooglePalmEmbeddings from langchain.embeddings.huggingface import ( @@ -60,6 +61,7 @@ "VertexAIEmbeddings", "BedrockEmbeddings", "DeepInfraEmbeddings", + "EmbaasEmbeddings", ] diff --git a/langchain/embeddings/embaas.py b/langchain/embeddings/embaas.py new file mode 100644 index 0000000000000..8a9134f711712 --- /dev/null +++ b/langchain/embeddings/embaas.py @@ -0,0 +1,140 @@ +"""Wrapper around embaas embeddings API.""" +from typing import Any, Dict, List, Mapping, Optional + +import requests +from pydantic import BaseModel, Extra, root_validator +from typing_extensions import NotRequired, TypedDict + +from langchain.embeddings.base import Embeddings +from langchain.utils import get_from_dict_or_env + +# Currently supported maximum batch size for embedding requests +MAX_BATCH_SIZE = 256 +EMBAAS_API_URL = "https://api.embaas.io/v1/embeddings/" + + +class EmbaasEmbeddingsPayload(TypedDict): + """Payload for the embaas embeddings API.""" + + model: str + texts: List[str] + instruction: NotRequired[str] + + +class EmbaasEmbeddings(BaseModel, Embeddings): + """Wrapper around embaas's embedding service. + + To use, you should have the + environment variable ``EMBAAS_API_KEY`` set with your API key, or pass + it as a named parameter to the constructor. + + Example: + .. code-block:: python + + # Initialise with default model and instruction + from langchain.llms import EmbaasEmbeddings + emb = EmbaasEmbeddings() + + # Initialise with custom model and instruction + from langchain.llms import EmbaasEmbeddings + emb_model = "instructor-large" + emb_inst = "Represent the Wikipedia document for retrieval" + emb = EmbaasEmbeddings( + model=emb_model, + instruction=emb_inst, + embaas_api_key="your-api-key" + ) + """ + + model: str = "e5-large-v2" + """The model used for embeddings.""" + instruction: Optional[str] = None + """Instruction used for domain-specific embeddings.""" + api_url: str = EMBAAS_API_URL + """The URL for the embaas embeddings API.""" + embaas_api_key: Optional[str] = None + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + embaas_api_key = get_from_dict_or_env( + values, "embaas_api_key", "EMBAAS_API_KEY" + ) + values["embaas_api_key"] = embaas_api_key + return values + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying params.""" + return {"model": self.model, "instruction": self.instruction} + + def _generate_payload(self, texts: List[str]) -> EmbaasEmbeddingsPayload: + """Generates payload for the API request.""" + payload = EmbaasEmbeddingsPayload(texts=texts, model=self.model) + if self.instruction: + payload["instruction"] = self.instruction + return payload + + def _handle_request(self, payload: EmbaasEmbeddingsPayload) -> List[List[float]]: + """Sends a request to the Embaas API and handles the response.""" + headers = { + "Authorization": f"Bearer {self.embaas_api_key}", + "Content-Type": "application/json", + } + + response = requests.post(self.api_url, headers=headers, json=payload) + response.raise_for_status() + + parsed_response = response.json() + embeddings = [item["embedding"] for item in parsed_response["data"]] + + return embeddings + + def _generate_embeddings(self, texts: List[str]) -> List[List[float]]: + """Generate embeddings using the Embaas API.""" + payload = self._generate_payload(texts) + try: + return self._handle_request(payload) + except requests.exceptions.RequestException as e: + if e.response is None or not e.response.text: + raise ValueError(f"Error raised by embaas embeddings API: {e}") + + parsed_response = e.response.json() + if "message" in parsed_response: + raise ValueError( + "Validation Error raised by embaas embeddings API:" + f"{parsed_response['message']}" + ) + raise + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Get embeddings for a list of texts. + + Args: + texts: The list of texts to get embeddings for. + + Returns: + List of embeddings, one for each text. + """ + batches = [ + texts[i : i + MAX_BATCH_SIZE] for i in range(0, len(texts), MAX_BATCH_SIZE) + ] + embeddings = [self._generate_embeddings(batch) for batch in batches] + # flatten the list of lists into a single list + return [embedding for batch in embeddings for embedding in batch] + + def embed_query(self, text: str) -> List[float]: + """Get embeddings for a single text. + + Args: + text: The text to get embeddings for. + + Returns: + List of embeddings. + """ + return self.embed_documents([text])[0] diff --git a/tests/integration_tests/embeddings/test_embaas.py b/tests/integration_tests/embeddings/test_embaas.py new file mode 100644 index 0000000000000..8a13f4d9965ba --- /dev/null +++ b/tests/integration_tests/embeddings/test_embaas.py @@ -0,0 +1,58 @@ +"""Test embaas embeddings.""" +import responses + +from langchain.embeddings.embaas import EMBAAS_API_URL, EmbaasEmbeddings + + +def test_embaas_embed_documents() -> None: + """Test embaas embeddings with multiple texts.""" + texts = ["foo bar", "bar foo", "foo"] + embedding = EmbaasEmbeddings() + output = embedding.embed_documents(texts) + assert len(output) == 3 + assert len(output[0]) == 1024 + assert len(output[1]) == 1024 + assert len(output[2]) == 1024 + + +def test_embaas_embed_query() -> None: + """Test embaas embeddings with multiple texts.""" + text = "foo" + embeddings = EmbaasEmbeddings() + output = embeddings.embed_query(text) + assert len(output) == 1024 + + +def test_embaas_embed_query_instruction() -> None: + """Test embaas embeddings with a different instruction.""" + text = "Test" + instruction = "query" + embeddings = EmbaasEmbeddings(instruction=instruction) + output = embeddings.embed_query(text) + assert len(output) == 1024 + + +def test_embaas_embed_query_model() -> None: + """Test embaas embeddings with a different model.""" + text = "Test" + model = "instructor-large" + instruction = "Represent the query for retrieval" + embeddings = EmbaasEmbeddings(model=model, instruction=instruction) + output = embeddings.embed_query(text) + assert len(output) == 768 + + +@responses.activate +def test_embaas_embed_documents_response() -> None: + """Test embaas embeddings with multiple texts.""" + responses.add( + responses.POST, + EMBAAS_API_URL, + json={"data": [{"embedding": [0.0] * 1024}]}, + status=200, + ) + + text = "asd" + embeddings = EmbaasEmbeddings() + output = embeddings.embed_query(text) + assert len(output) == 1024 From 614cff89bc99dcb2c995a0ec0c375c9ae8941d66 Mon Sep 17 00:00:00 2001 From: Zander Chase <130414180+vowelparrot@users.noreply.github.com> Date: Sun, 11 Jun 2023 15:45:12 -0700 Subject: [PATCH 29/46] I before E (#6015) --- langchain/retrievers/milvus.py | 12 +++++++++++- langchain/retrievers/zilliz.py | 12 +++++++++++- 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/langchain/retrievers/milvus.py b/langchain/retrievers/milvus.py index 915d61d989763..58f1bf23cf88f 100644 --- a/langchain/retrievers/milvus.py +++ b/langchain/retrievers/milvus.py @@ -1,4 +1,5 @@ """Milvus Retriever""" +import warnings from typing import Any, Dict, List, Optional from langchain.embeddings.base import Embeddings @@ -8,7 +9,7 @@ # TODO: Update to MilvusClient + Hybrid Search when available -class MilvusRetreiver(BaseRetriever): +class MilvusRetriever(BaseRetriever): def __init__( self, embedding_function: Embeddings, @@ -41,3 +42,12 @@ def get_relevant_documents(self, query: str) -> List[Document]: async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError + + +def MilvusRetreiver(*args: Any, **kwargs: Any) -> MilvusRetriever: + warnings.warn( + "MilvusRetreiver will be deprecated in the future. " + "Please use MilvusRetriever ('i' before 'e') instead.", + DeprecationWarning, + ) + return MilvusRetriever(*args, **kwargs) diff --git a/langchain/retrievers/zilliz.py b/langchain/retrievers/zilliz.py index 6b39a3a022e36..d64a49758e9ff 100644 --- a/langchain/retrievers/zilliz.py +++ b/langchain/retrievers/zilliz.py @@ -1,4 +1,5 @@ """Zilliz Retriever""" +import warnings from typing import Any, Dict, List, Optional from langchain.embeddings.base import Embeddings @@ -8,7 +9,7 @@ # TODO: Update to ZillizClient + Hybrid Search when available -class ZillizRetreiver(BaseRetriever): +class ZillizRetriever(BaseRetriever): def __init__( self, embedding_function: Embeddings, @@ -41,3 +42,12 @@ def get_relevant_documents(self, query: str) -> List[Document]: async def aget_relevant_documents(self, query: str) -> List[Document]: raise NotImplementedError + + +def ZillizRetreiver(*args: Any, **kwargs: Any) -> ZillizRetriever: + warnings.warn( + "ZillizRetreiver will be deprecated in the future. " + "Please use ZillizRetriever ('i' before 'e') instead.", + DeprecationWarning, + ) + return ZillizRetriever(*args, **kwargs) From 18af149e91e62b3ac7728ddea420688d41043734 Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Sun, 11 Jun 2023 23:51:28 +0100 Subject: [PATCH 30/46] nc/load (#5733) Co-authored-by: Harrison Chase --- langchain/base_language.py | 5 +- langchain/callbacks/manager.py | 2 +- langchain/callbacks/tracers/base.py | 3 - langchain/callbacks/tracers/schemas.py | 5 +- langchain/chains/base.py | 10 +- langchain/chains/llm.py | 9 +- langchain/chat_models/base.py | 7 +- langchain/chat_models/openai.py | 4 + langchain/llms/base.py | 13 +- langchain/llms/openai.py | 8 + langchain/load/__init__.py | 0 langchain/load/dump.py | 22 ++ langchain/load/load.py | 65 +++++ langchain/load/serializable.py | 135 +++++++++ langchain/prompts/base.py | 9 +- langchain/prompts/chat.py | 11 +- langchain/prompts/few_shot.py | 4 + langchain/prompts/prompt.py | 6 + langchain/schema.py | 14 +- poetry.lock | 31 +- pyproject.toml | 9 +- .../callbacks/tracers/test_base_tracer.py | 39 +-- .../callbacks/tracers/test_langchain_v1.py | 39 +-- .../load/__snapshots__/test_dump.ambr | 273 ++++++++++++++++++ tests/unit_tests/load/test_dump.py | 103 +++++++ tests/unit_tests/load/test_load.py | 54 ++++ tests/unit_tests/test_dependencies.py | 1 + 27 files changed, 810 insertions(+), 71 deletions(-) create mode 100644 langchain/load/__init__.py create mode 100644 langchain/load/dump.py create mode 100644 langchain/load/load.py create mode 100644 langchain/load/serializable.py create mode 100644 tests/unit_tests/load/__snapshots__/test_dump.ambr create mode 100644 tests/unit_tests/load/test_dump.py create mode 100644 tests/unit_tests/load/test_load.py diff --git a/langchain/base_language.py b/langchain/base_language.py index 2587e8d245ecc..f02e43d613c51 100644 --- a/langchain/base_language.py +++ b/langchain/base_language.py @@ -4,9 +4,8 @@ from abc import ABC, abstractmethod from typing import Any, List, Optional, Sequence, Set -from pydantic import BaseModel - from langchain.callbacks.manager import Callbacks +from langchain.load.serializable import Serializable from langchain.schema import BaseMessage, LLMResult, PromptValue, get_buffer_string @@ -29,7 +28,7 @@ def _get_token_ids_default_method(text: str) -> List[int]: return tokenizer.encode(text) -class BaseLanguageModel(BaseModel, ABC): +class BaseLanguageModel(Serializable, ABC): @abstractmethod def generate_prompt( self, diff --git a/langchain/callbacks/manager.py b/langchain/callbacks/manager.py index 2c935003200a7..07600bf97ad24 100644 --- a/langchain/callbacks/manager.py +++ b/langchain/callbacks/manager.py @@ -204,7 +204,7 @@ def _handle_event( except Exception as e: if handler.raise_error: raise e - logging.warning(f"Error in {event_name} callback: {e}") + logger.warning(f"Error in {event_name} callback: {e}") async def _ahandle_event_for_handler( diff --git a/langchain/callbacks/tracers/base.py b/langchain/callbacks/tracers/base.py index 4c7ddbac92b27..93df351383d1d 100644 --- a/langchain/callbacks/tracers/base.py +++ b/langchain/callbacks/tracers/base.py @@ -93,7 +93,6 @@ def on_llm_start( execution_order = self._get_execution_order(parent_run_id_) llm_run = Run( id=run_id, - name=serialized.get("name"), parent_run_id=parent_run_id, serialized=serialized, inputs={"prompts": prompts}, @@ -154,7 +153,6 @@ def on_chain_start( execution_order = self._get_execution_order(parent_run_id_) chain_run = Run( id=run_id, - name=serialized.get("name"), parent_run_id=parent_run_id, serialized=serialized, inputs=inputs, @@ -216,7 +214,6 @@ def on_tool_start( execution_order = self._get_execution_order(parent_run_id_) tool_run = Run( id=run_id, - name=serialized.get("name"), parent_run_id=parent_run_id, serialized=serialized, inputs={"input": input_str}, diff --git a/langchain/callbacks/tracers/schemas.py b/langchain/callbacks/tracers/schemas.py index 4816b8b90d1d8..bc8abeae06a4c 100644 --- a/langchain/callbacks/tracers/schemas.py +++ b/langchain/callbacks/tracers/schemas.py @@ -124,7 +124,10 @@ class Run(RunBase): def assign_name(cls, values: dict) -> dict: """Assign name to the run.""" if "name" not in values: - values["name"] = values["serialized"]["name"] + if "name" in values["serialized"]: + values["name"] = values["serialized"]["name"] + elif "id" in values["serialized"]: + values["name"] = values["serialized"]["id"][-1] return values diff --git a/langchain/chains/base.py b/langchain/chains/base.py index 2db63a8fef9e7..66354adc8c81b 100644 --- a/langchain/chains/base.py +++ b/langchain/chains/base.py @@ -7,7 +7,7 @@ from typing import Any, Dict, List, Optional, Union import yaml -from pydantic import BaseModel, Field, root_validator, validator +from pydantic import Field, root_validator, validator import langchain from langchain.callbacks.base import BaseCallbackManager @@ -18,6 +18,8 @@ CallbackManagerForChainRun, Callbacks, ) +from langchain.load.dump import dumpd +from langchain.load.serializable import Serializable from langchain.schema import RUN_KEY, BaseMemory, RunInfo @@ -25,7 +27,7 @@ def _get_verbosity() -> bool: return langchain.verbose -class Chain(BaseModel, ABC): +class Chain(Serializable, ABC): """Base interface that all chains should implement.""" memory: Optional[BaseMemory] = None @@ -131,7 +133,7 @@ def __call__( ) new_arg_supported = inspect.signature(self._call).parameters.get("run_manager") run_manager = callback_manager.on_chain_start( - {"name": self.__class__.__name__}, + dumpd(self), inputs, ) try: @@ -179,7 +181,7 @@ async def acall( ) new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager") run_manager = await callback_manager.on_chain_start( - {"name": self.__class__.__name__}, + dumpd(self), inputs, ) try: diff --git a/langchain/chains/llm.py b/langchain/chains/llm.py index 18d8f5392a8f9..4c74353059cf1 100644 --- a/langchain/chains/llm.py +++ b/langchain/chains/llm.py @@ -15,6 +15,7 @@ ) from langchain.chains.base import Chain from langchain.input import get_colored_text +from langchain.load.dump import dumpd from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import LLMResult, PromptValue @@ -34,6 +35,10 @@ class LLMChain(Chain): llm = LLMChain(llm=OpenAI(), prompt=prompt) """ + @property + def lc_serializable(self) -> bool: + return True + prompt: BasePromptTemplate """Prompt object to use.""" llm: BaseLanguageModel @@ -147,7 +152,7 @@ def apply( callbacks, self.callbacks, self.verbose ) run_manager = callback_manager.on_chain_start( - {"name": self.__class__.__name__}, + dumpd(self), {"input_list": input_list}, ) try: @@ -167,7 +172,7 @@ async def aapply( callbacks, self.callbacks, self.verbose ) run_manager = await callback_manager.on_chain_start( - {"name": self.__class__.__name__}, + dumpd(self), {"input_list": input_list}, ) try: diff --git a/langchain/chat_models/base.py b/langchain/chat_models/base.py index 05c1e8d55d279..f3521df6fb17d 100644 --- a/langchain/chat_models/base.py +++ b/langchain/chat_models/base.py @@ -17,6 +17,7 @@ CallbackManagerForLLMRun, Callbacks, ) +from langchain.load.dump import dumpd from langchain.schema import ( AIMessage, BaseMessage, @@ -70,12 +71,13 @@ def generate( params = self.dict() params["stop"] = stop + options = {"stop": stop} callback_manager = CallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = callback_manager.on_chat_model_start( - {"name": self.__class__.__name__}, messages, invocation_params=params + dumpd(self), messages, invocation_params=params, options=options ) new_arg_supported = inspect.signature(self._generate).parameters.get( @@ -109,12 +111,13 @@ async def agenerate( """Top Level call""" params = self.dict() params["stop"] = stop + options = {"stop": stop} callback_manager = AsyncCallbackManager.configure( callbacks, self.callbacks, self.verbose ) run_manager = await callback_manager.on_chat_model_start( - {"name": self.__class__.__name__}, messages, invocation_params=params + dumpd(self), messages, invocation_params=params, options=options ) new_arg_supported = inspect.signature(self._agenerate).parameters.get( diff --git a/langchain/chat_models/openai.py b/langchain/chat_models/openai.py index d7c5183260331..b1dcb9de27774 100644 --- a/langchain/chat_models/openai.py +++ b/langchain/chat_models/openai.py @@ -136,6 +136,10 @@ class ChatOpenAI(BaseChatModel): openai = ChatOpenAI(model_name="gpt-3.5-turbo") """ + @property + def lc_serializable(self) -> bool: + return True + client: Any #: :meta private: model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" diff --git a/langchain/llms/base.py b/langchain/llms/base.py index 866bdada53171..9b06514236557 100644 --- a/langchain/llms/base.py +++ b/langchain/llms/base.py @@ -19,6 +19,7 @@ CallbackManagerForLLMRun, Callbacks, ) +from langchain.load.dump import dumpd from langchain.schema import ( AIMessage, BaseMessage, @@ -166,6 +167,7 @@ def generate( ) params = self.dict() params["stop"] = stop + options = {"stop": stop} ( existing_prompts, llm_string, @@ -186,7 +188,7 @@ def generate( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = callback_manager.on_llm_start( - {"name": self.__class__.__name__}, prompts, invocation_params=params + dumpd(self), prompts, invocation_params=params, options=options ) try: output = ( @@ -205,9 +207,10 @@ def generate( return output if len(missing_prompts) > 0: run_manager = callback_manager.on_llm_start( - {"name": self.__class__.__name__}, + dumpd(self), missing_prompts, invocation_params=params, + options=options, ) try: new_results = ( @@ -243,6 +246,7 @@ async def agenerate( """Run the LLM on the given prompt and input.""" params = self.dict() params["stop"] = stop + options = {"stop": stop} ( existing_prompts, llm_string, @@ -263,7 +267,7 @@ async def agenerate( "Asked to cache, but no cache found at `langchain.cache`." ) run_manager = await callback_manager.on_llm_start( - {"name": self.__class__.__name__}, prompts, invocation_params=params + dumpd(self), prompts, invocation_params=params, options=options ) try: output = ( @@ -282,9 +286,10 @@ async def agenerate( return output if len(missing_prompts) > 0: run_manager = await callback_manager.on_llm_start( - {"name": self.__class__.__name__}, + dumpd(self), missing_prompts, invocation_params=params, + options=options, ) try: new_results = ( diff --git a/langchain/llms/openai.py b/langchain/llms/openai.py index bb1c021283a47..172697a324eaa 100644 --- a/langchain/llms/openai.py +++ b/langchain/llms/openai.py @@ -123,6 +123,14 @@ async def _completion_with_retry(**kwargs: Any) -> Any: class BaseOpenAI(BaseLLM): """Wrapper around OpenAI large language models.""" + @property + def lc_secrets(self) -> Dict[str, str]: + return {"openai_api_key": "OPENAI_API_KEY"} + + @property + def lc_serializable(self) -> bool: + return True + client: Any #: :meta private: model_name: str = Field("text-davinci-003", alias="model") """Model name to use.""" diff --git a/langchain/load/__init__.py b/langchain/load/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/langchain/load/dump.py b/langchain/load/dump.py new file mode 100644 index 0000000000000..d59fb0b2c38d8 --- /dev/null +++ b/langchain/load/dump.py @@ -0,0 +1,22 @@ +import json +from typing import Any, Dict + +from langchain.load.serializable import Serializable, to_json_not_implemented + + +def default(obj: Any) -> Any: + if isinstance(obj, Serializable): + return obj.to_json() + else: + return to_json_not_implemented(obj) + + +def dumps(obj: Any, *, pretty: bool = False) -> str: + if pretty: + return json.dumps(obj, default=default, indent=2) + else: + return json.dumps(obj, default=default) + + +def dumpd(obj: Any) -> Dict[str, Any]: + return json.loads(dumps(obj)) diff --git a/langchain/load/load.py b/langchain/load/load.py new file mode 100644 index 0000000000000..3cac560a9ae28 --- /dev/null +++ b/langchain/load/load.py @@ -0,0 +1,65 @@ +import importlib +import json +from typing import Any, Dict, Optional + +from langchain.load.serializable import Serializable + + +class Reviver: + def __init__(self, secrets_map: Optional[Dict[str, str]] = None) -> None: + self.secrets_map = secrets_map or dict() + + def __call__(self, value: Dict[str, Any]) -> Any: + if ( + value.get("lc", None) == 1 + and value.get("type", None) == "secret" + and value.get("id", None) is not None + ): + [key] = value["id"] + if key in self.secrets_map: + return self.secrets_map[key] + else: + raise KeyError(f'Missing key "{key}" in load(secrets_map)') + + if ( + value.get("lc", None) == 1 + and value.get("type", None) == "not_implemented" + and value.get("id", None) is not None + ): + raise NotImplementedError( + "Trying to load an object that doesn't implement " + f"serialization: {value}" + ) + + if ( + value.get("lc", None) == 1 + and value.get("type", None) == "constructor" + and value.get("id", None) is not None + ): + [*namespace, name] = value["id"] + + # Currently, we only support langchain imports. + if namespace[0] != "langchain": + raise ValueError(f"Invalid namespace: {value}") + + # The root namespace "langchain" is not a valid identifier. + if len(namespace) == 1: + raise ValueError(f"Invalid namespace: {value}") + + mod = importlib.import_module(".".join(namespace)) + cls = getattr(mod, name) + + # The class must be a subclass of Serializable. + if not issubclass(cls, Serializable): + raise ValueError(f"Invalid namespace: {value}") + + # We don't need to recurse on kwargs + # as json.loads will do that for us. + kwargs = value.get("kwargs", dict()) + return cls(**kwargs) + + return value + + +def loads(text: str, *, secrets_map: Optional[Dict[str, str]] = None) -> Any: + return json.loads(text, object_hook=Reviver(secrets_map)) diff --git a/langchain/load/serializable.py b/langchain/load/serializable.py new file mode 100644 index 0000000000000..9c8c60bfe9313 --- /dev/null +++ b/langchain/load/serializable.py @@ -0,0 +1,135 @@ +from abc import ABC +from typing import Any, Dict, List, Literal, TypedDict, Union, cast + +from pydantic import BaseModel, Field + + +class BaseSerialized(TypedDict): + lc: int + id: List[str] + + +class SerializedConstructor(BaseSerialized): + type: Literal["constructor"] + kwargs: Dict[str, Any] + + +class SerializedSecret(BaseSerialized): + type: Literal["secret"] + + +class SerializedNotImplemented(BaseSerialized): + type: Literal["not_implemented"] + + +class Serializable(BaseModel, ABC): + @property + def lc_serializable(self) -> bool: + """ + Return whether or not the class is serializable. + """ + return False + + @property + def lc_namespace(self) -> List[str]: + """ + Return the namespace of the langchain object. + eg. ["langchain", "llms", "openai"] + """ + return self.__class__.__module__.split(".") + + @property + def lc_secrets(self) -> Dict[str, str]: + """ + Return a map of constructor argument names to secret ids. + eg. {"openai_api_key": "OPENAI_API_KEY"} + """ + return dict() + + @property + def lc_attributes(self) -> Dict: + """ + Return a list of attribute names that should be included in the + serialized kwargs. These attributes must be accepted by the + constructor. + """ + return {} + + lc_kwargs: Dict[str, Any] = Field(default_factory=dict, exclude=True) + + def __init__(self, **kwargs: Any) -> None: + super().__init__(**kwargs) + self.lc_kwargs = kwargs + + def to_json(self) -> Union[SerializedConstructor, SerializedNotImplemented]: + if not self.lc_serializable: + return self.to_json_not_implemented() + + secrets = dict() + # Get latest values for kwargs if there is an attribute with same name + lc_kwargs = { + k: getattr(self, k, v) + for k, v in self.lc_kwargs.items() + if not self.__exclude_fields__.get(k, False) # type: ignore + } + + # Merge the lc_secrets and lc_attributes from every class in the MRO + for cls in [None, *self.__class__.mro()]: + # Once we get to Serializable, we're done + if cls is Serializable: + break + + # Get a reference to self bound to each class in the MRO + this = cast(Serializable, self if cls is None else super(cls, self)) + + secrets.update(this.lc_secrets) + lc_kwargs.update(this.lc_attributes) + + return { + "lc": 1, + "type": "constructor", + "id": [*self.lc_namespace, self.__class__.__name__], + "kwargs": lc_kwargs + if not secrets + else _replace_secrets(lc_kwargs, secrets), + } + + def to_json_not_implemented(self) -> SerializedNotImplemented: + return to_json_not_implemented(self) + + +def _replace_secrets( + root: Dict[Any, Any], secrets_map: Dict[str, str] +) -> Dict[Any, Any]: + result = root.copy() + for path, secret_id in secrets_map.items(): + [*parts, last] = path.split(".") + current = result + for part in parts: + if part not in current: + break + current[part] = current[part].copy() + current = current[part] + if last in current: + current[last] = { + "lc": 1, + "type": "secret", + "id": [secret_id], + } + return result + + +def to_json_not_implemented(obj: object) -> SerializedNotImplemented: + _id: List[str] = [] + try: + if hasattr(obj, "__name__"): + _id = [*obj.__module__.split("."), obj.__name__] + elif hasattr(obj, "__class__"): + _id = [*obj.__class__.__module__.split("."), obj.__class__.__name__] + except Exception: + pass + return { + "lc": 1, + "type": "not_implemented", + "id": _id, + } diff --git a/langchain/prompts/base.py b/langchain/prompts/base.py index 8d31b10ead780..58e7339b8a5eb 100644 --- a/langchain/prompts/base.py +++ b/langchain/prompts/base.py @@ -7,9 +7,10 @@ from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Union import yaml -from pydantic import BaseModel, Extra, Field, root_validator +from pydantic import Extra, Field, root_validator from langchain.formatting import formatter +from langchain.load.serializable import Serializable from langchain.schema import BaseMessage, BaseOutputParser, HumanMessage, PromptValue @@ -100,7 +101,7 @@ def to_messages(self) -> List[BaseMessage]: return [HumanMessage(content=self.text)] -class BasePromptTemplate(BaseModel, ABC): +class BasePromptTemplate(Serializable, ABC): """Base class for all prompt templates, returning a prompt.""" input_variables: List[str] @@ -111,6 +112,10 @@ class BasePromptTemplate(BaseModel, ABC): default_factory=dict ) + @property + def lc_serializable(self) -> bool: + return True + class Config: """Configuration for this pydantic object.""" diff --git a/langchain/prompts/chat.py b/langchain/prompts/chat.py index 89fb10b091190..1edc8f6ca8dea 100644 --- a/langchain/prompts/chat.py +++ b/langchain/prompts/chat.py @@ -5,8 +5,9 @@ from pathlib import Path from typing import Any, Callable, List, Sequence, Tuple, Type, TypeVar, Union -from pydantic import BaseModel, Field +from pydantic import Field +from langchain.load.serializable import Serializable from langchain.memory.buffer import get_buffer_string from langchain.prompts.base import BasePromptTemplate, StringPromptTemplate from langchain.prompts.prompt import PromptTemplate @@ -20,7 +21,11 @@ ) -class BaseMessagePromptTemplate(BaseModel, ABC): +class BaseMessagePromptTemplate(Serializable, ABC): + @property + def lc_serializable(self) -> bool: + return True + @abstractmethod def format_messages(self, **kwargs: Any) -> List[BaseMessage]: """To messages.""" @@ -220,7 +225,7 @@ def partial(self, **kwargs: Union[str, Callable[[], str]]) -> BasePromptTemplate @property def _prompt_type(self) -> str: - raise NotImplementedError + return "chat" def save(self, file_path: Union[Path, str]) -> None: raise NotImplementedError diff --git a/langchain/prompts/few_shot.py b/langchain/prompts/few_shot.py index e17c53548c928..9012295385cdc 100644 --- a/langchain/prompts/few_shot.py +++ b/langchain/prompts/few_shot.py @@ -15,6 +15,10 @@ class FewShotPromptTemplate(StringPromptTemplate): """Prompt template that contains few shot examples.""" + @property + def lc_serializable(self) -> bool: + return False + examples: Optional[List[dict]] = None """Examples to format into the prompt. Either this or example_selector should be provided.""" diff --git a/langchain/prompts/prompt.py b/langchain/prompts/prompt.py index 31f87d43b933d..c8ac2200b6d53 100644 --- a/langchain/prompts/prompt.py +++ b/langchain/prompts/prompt.py @@ -25,6 +25,12 @@ class PromptTemplate(StringPromptTemplate): prompt = PromptTemplate(input_variables=["foo"], template="Say {foo}") """ + @property + def lc_attributes(self) -> Dict[str, Any]: + return { + "template_format": self.template_format, + } + input_variables: List[str] """A list of the names of the variables the prompt template expects.""" diff --git a/langchain/schema.py b/langchain/schema.py index b74b40a7c5e42..b2f76e705d3d2 100644 --- a/langchain/schema.py +++ b/langchain/schema.py @@ -17,6 +17,8 @@ from pydantic import BaseModel, Extra, Field, root_validator +from langchain.load.serializable import Serializable + RUN_KEY = "__run" @@ -55,7 +57,7 @@ class AgentFinish(NamedTuple): log: str -class Generation(BaseModel): +class Generation(Serializable): """Output of a single generation.""" text: str @@ -67,7 +69,7 @@ class Generation(BaseModel): # TODO: add log probs -class BaseMessage(BaseModel): +class BaseMessage(Serializable): """Message object.""" content: str @@ -194,7 +196,7 @@ def __eq__(self, other: object) -> bool: ) -class PromptValue(BaseModel, ABC): +class PromptValue(Serializable, ABC): @abstractmethod def to_string(self) -> str: """Return prompt as string.""" @@ -204,7 +206,7 @@ def to_messages(self) -> List[BaseMessage]: """Return prompt as messages.""" -class BaseMemory(BaseModel, ABC): +class BaseMemory(Serializable, ABC): """Base interface for memory in chains.""" class Config: @@ -282,7 +284,7 @@ def clear(self) -> None: """Remove all messages from the store""" -class Document(BaseModel): +class Document(Serializable): """Interface for interacting with a document.""" page_content: str @@ -321,7 +323,7 @@ async def aget_relevant_documents(self, query: str) -> List[Document]: T = TypeVar("T") -class BaseOutputParser(BaseModel, ABC, Generic[T]): +class BaseOutputParser(Serializable, ABC, Generic[T]): """Class to parse the output of an LLM call. Output parsers help structure language model responses. diff --git a/poetry.lock b/poetry.lock index 03242e43c1d9b..9ed0db756c9e3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1417,6 +1417,17 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "colored" +version = "1.4.4" +description = "Simple library for color and formatting to terminal" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "colored-1.4.4.tar.gz", hash = "sha256:04ff4d4dd514274fe3b99a21bb52fb96f2688c01e93fba7bef37221e7cb56ce0"}, +] + [[package]] name = "coloredlogs" version = "15.0.1" @@ -9461,6 +9472,22 @@ files = [ [package.dependencies] mpmath = ">=0.19" +[[package]] +name = "syrupy" +version = "4.0.2" +description = "Pytest Snapshot Test Utility" +category = "dev" +optional = false +python-versions = ">=3.8.1,<4" +files = [ + {file = "syrupy-4.0.2-py3-none-any.whl", hash = "sha256:dfd1f0fad298eee753de4f2471d4346412c4435885c4b7beea648d4934c6620a"}, + {file = "syrupy-4.0.2.tar.gz", hash = "sha256:3c75ab6866580679b2cb9abe78e74c3e2011fffc6333651c6beb2a78a716ab80"}, +] + +[package.dependencies] +colored = ">=1.3.92,<2.0.0" +pytest = ">=7.0.0,<8.0.0" + [[package]] name = "tabulate" version = "0.9.0" @@ -11428,7 +11455,7 @@ azure = ["azure-identity", "azure-cosmos", "openai", "azure-core", "azure-ai-for cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] -extended-testing = ["beautifulsoup4", "bibtexparser", "chardet", "jq", "pdfminer-six", "pypdf", "pymupdf", "pypdfium2", "tqdm", "lxml", "atlassian-python-api", "beautifulsoup4", "pandas", "telethon", "psychicapi", "zep-python", "gql", "requests-toolbelt", "html2text", "py-trello", "scikit-learn", "pyspark"] +extended-testing = ["beautifulsoup4", "bibtexparser", "chardet", "jq", "pdfminer-six", "pypdf", "pymupdf", "pypdfium2", "tqdm", "lxml", "atlassian-python-api", "beautifulsoup4", "pandas", "telethon", "psychicapi", "zep-python", "gql", "requests-toolbelt", "html2text", "py-trello", "scikit-learn", "pyspark", "openai"] llms = ["anthropic", "cohere", "openai", "openlm", "nlpcloud", "huggingface_hub", "manifest-ml", "torch", "transformers"] openai = ["openai", "tiktoken"] qdrant = ["qdrant-client"] @@ -11437,4 +11464,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "ecf7086e83cc0ff19e6851c0b63170b082b267c1c1c00f47700fd3a8c8bb46c5" +content-hash = "7a39130af070d4a4fe6b0af5d6b70615c868ab0b1867e404060ff00eacd10f5f" diff --git a/pyproject.toml b/pyproject.toml index d8fbca994fadb..fc6edb5b8a962 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -139,6 +139,7 @@ pytest-asyncio = "^0.20.3" lark = "^1.1.5" pytest-mock = "^3.10.0" pytest-socket = "^0.6.0" +syrupy = "^4.0.2" [tool.poetry.group.test_integration] optional = true @@ -315,7 +316,8 @@ extended_testing = [ "html2text", "py-trello", "scikit-learn", - "pyspark" + "pyspark", + "openai" ] [tool.ruff] @@ -349,7 +351,10 @@ build-backend = "poetry.core.masonry.api" # https://docs.pytest.org/en/7.1.x/reference/reference.html # --strict-config any warnings encountered while parsing the `pytest` # section of the configuration file raise errors. -addopts = "--strict-markers --strict-config --durations=5" +# +# https://github.com/tophat/syrupy +# --snapshot-warn-unused Prints a warning on unused snapshots rather than fail the test suite. +addopts = "--strict-markers --strict-config --durations=5 --snapshot-warn-unused" # Registering custom markers. # https://docs.pytest.org/en/7.1.x/example/markers.html#registering-markers markers = [ diff --git a/tests/unit_tests/callbacks/tracers/test_base_tracer.py b/tests/unit_tests/callbacks/tracers/test_base_tracer.py index 4ff2e342c5fd7..c0736c62eb693 100644 --- a/tests/unit_tests/callbacks/tracers/test_base_tracer.py +++ b/tests/unit_tests/callbacks/tracers/test_base_tracer.py @@ -13,6 +13,9 @@ from langchain.callbacks.tracers.schemas import Run from langchain.schema import LLMResult +SERIALIZED = {"id": ["llm"]} +SERIALIZED_CHAT = {"id": ["chat_model"]} + class FakeTracer(BaseTracer): """Fake tracer that records LangChain execution.""" @@ -39,7 +42,7 @@ def test_tracer_llm_run() -> None: extra={}, execution_order=1, child_execution_order=1, - serialized={"name": "llm"}, + serialized=SERIALIZED, inputs={"prompts": []}, outputs=LLMResult(generations=[[]]), error=None, @@ -47,7 +50,7 @@ def test_tracer_llm_run() -> None: ) tracer = FakeTracer() - tracer.on_llm_start(serialized={"name": "llm"}, prompts=[], run_id=uuid) + tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid) assert tracer.runs == [compare_run] @@ -64,7 +67,7 @@ def test_tracer_chat_model_run() -> None: extra={}, execution_order=1, child_execution_order=1, - serialized={"name": "chat_model"}, + serialized=SERIALIZED_CHAT, inputs=dict(prompts=[""]), outputs=LLMResult(generations=[[]]), error=None, @@ -73,7 +76,7 @@ def test_tracer_chat_model_run() -> None: tracer = FakeTracer() manager = CallbackManager(handlers=[tracer]) run_manager = manager.on_chat_model_start( - serialized={"name": "chat_model"}, messages=[[]], run_id=uuid + serialized=SERIALIZED_CHAT, messages=[[]], run_id=uuid ) run_manager.on_llm_end(response=LLMResult(generations=[[]])) assert tracer.runs == [compare_run] @@ -100,7 +103,7 @@ def test_tracer_multiple_llm_runs() -> None: extra={}, execution_order=1, child_execution_order=1, - serialized={"name": "llm"}, + serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]]), error=None, @@ -110,7 +113,7 @@ def test_tracer_multiple_llm_runs() -> None: num_runs = 10 for _ in range(num_runs): - tracer.on_llm_start(serialized={"name": "llm"}, prompts=[], run_id=uuid) + tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid) assert tracer.runs == [compare_run] * num_runs @@ -183,7 +186,7 @@ def test_tracer_nested_run() -> None: parent_run_id=chain_uuid, ) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid1, parent_run_id=tool_uuid, @@ -191,7 +194,7 @@ def test_tracer_nested_run() -> None: tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1) tracer.on_tool_end("test", run_id=tool_uuid) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid2, parent_run_id=chain_uuid, @@ -235,7 +238,7 @@ def test_tracer_nested_run() -> None: extra={}, execution_order=3, child_execution_order=3, - serialized={"name": "llm"}, + serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]]), run_type="llm", @@ -251,7 +254,7 @@ def test_tracer_nested_run() -> None: extra={}, execution_order=4, child_execution_order=4, - serialized={"name": "llm"}, + serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]]), run_type="llm", @@ -275,7 +278,7 @@ def test_tracer_llm_run_on_error() -> None: extra={}, execution_order=1, child_execution_order=1, - serialized={"name": "llm"}, + serialized=SERIALIZED, inputs=dict(prompts=[]), outputs=None, error=repr(exception), @@ -283,7 +286,7 @@ def test_tracer_llm_run_on_error() -> None: ) tracer = FakeTracer() - tracer.on_llm_start(serialized={"name": "llm"}, prompts=[], run_id=uuid) + tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_error(exception, run_id=uuid) assert tracer.runs == [compare_run] @@ -358,14 +361,14 @@ def test_tracer_nested_runs_on_error() -> None: serialized={"name": "chain"}, inputs={}, run_id=chain_uuid ) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid1, parent_run_id=chain_uuid, ) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid2, parent_run_id=chain_uuid, @@ -378,7 +381,7 @@ def test_tracer_nested_runs_on_error() -> None: parent_run_id=chain_uuid, ) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid3, parent_run_id=tool_uuid, @@ -408,7 +411,7 @@ def test_tracer_nested_runs_on_error() -> None: extra={}, execution_order=2, child_execution_order=2, - serialized={"name": "llm"}, + serialized=SERIALIZED, error=None, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]], llm_output=None), @@ -422,7 +425,7 @@ def test_tracer_nested_runs_on_error() -> None: extra={}, execution_order=3, child_execution_order=3, - serialized={"name": "llm"}, + serialized=SERIALIZED, error=None, inputs=dict(prompts=[]), outputs=LLMResult(generations=[[]], llm_output=None), @@ -450,7 +453,7 @@ def test_tracer_nested_runs_on_error() -> None: extra={}, execution_order=5, child_execution_order=5, - serialized={"name": "llm"}, + serialized=SERIALIZED, error=repr(exception), inputs=dict(prompts=[]), outputs=None, diff --git a/tests/unit_tests/callbacks/tracers/test_langchain_v1.py b/tests/unit_tests/callbacks/tracers/test_langchain_v1.py index ab655ac631242..782f3fbcfe7ae 100644 --- a/tests/unit_tests/callbacks/tracers/test_langchain_v1.py +++ b/tests/unit_tests/callbacks/tracers/test_langchain_v1.py @@ -22,6 +22,9 @@ TEST_SESSION_ID = 2023 +SERIALIZED = {"id": ["llm"]} +SERIALIZED_CHAT = {"id": ["chat_model"]} + def load_session(session_name: str) -> TracerSessionV1: """Load a tracing session.""" @@ -107,7 +110,7 @@ def test_tracer_llm_run() -> None: extra={}, execution_order=1, child_execution_order=1, - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], response=LLMResult(generations=[[]]), session_id=TEST_SESSION_ID, @@ -116,7 +119,7 @@ def test_tracer_llm_run() -> None: tracer = FakeTracer() tracer.new_session() - tracer.on_llm_start(serialized={"name": "llm"}, prompts=[], run_id=uuid) + tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid) assert tracer.runs == [compare_run] @@ -133,7 +136,7 @@ def test_tracer_chat_model_run() -> None: extra={}, execution_order=1, child_execution_order=1, - serialized={"name": "chat_model"}, + serialized=SERIALIZED_CHAT, prompts=[""], response=LLMResult(generations=[[]]), session_id=TEST_SESSION_ID, @@ -144,7 +147,7 @@ def test_tracer_chat_model_run() -> None: tracer.new_session() manager = CallbackManager(handlers=[tracer]) run_manager = manager.on_chat_model_start( - serialized={"name": "chat_model"}, messages=[[]], run_id=uuid + serialized=SERIALIZED_CHAT, messages=[[]], run_id=uuid ) run_manager.on_llm_end(response=LLMResult(generations=[[]])) assert tracer.runs == [compare_run] @@ -172,7 +175,7 @@ def test_tracer_multiple_llm_runs() -> None: extra={}, execution_order=1, child_execution_order=1, - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], response=LLMResult(generations=[[]]), session_id=TEST_SESSION_ID, @@ -183,7 +186,7 @@ def test_tracer_multiple_llm_runs() -> None: tracer.new_session() num_runs = 10 for _ in range(num_runs): - tracer.on_llm_start(serialized={"name": "llm"}, prompts=[], run_id=uuid) + tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=uuid) assert tracer.runs == [compare_run] * num_runs @@ -263,7 +266,7 @@ def test_tracer_nested_run() -> None: parent_run_id=chain_uuid, ) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid1, parent_run_id=tool_uuid, @@ -271,7 +274,7 @@ def test_tracer_nested_run() -> None: tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1) tracer.on_tool_end("test", run_id=tool_uuid) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid2, parent_run_id=chain_uuid, @@ -319,7 +322,7 @@ def test_tracer_nested_run() -> None: extra={}, execution_order=3, child_execution_order=3, - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], response=LLMResult(generations=[[]]), session_id=TEST_SESSION_ID, @@ -337,7 +340,7 @@ def test_tracer_nested_run() -> None: extra={}, execution_order=4, child_execution_order=4, - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], response=LLMResult(generations=[[]]), session_id=TEST_SESSION_ID, @@ -362,7 +365,7 @@ def test_tracer_llm_run_on_error() -> None: extra={}, execution_order=1, child_execution_order=1, - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], response=None, session_id=TEST_SESSION_ID, @@ -371,7 +374,7 @@ def test_tracer_llm_run_on_error() -> None: tracer = FakeTracer() tracer.new_session() - tracer.on_llm_start(serialized={"name": "llm"}, prompts=[], run_id=uuid) + tracer.on_llm_start(serialized=SERIALIZED, prompts=[], run_id=uuid) tracer.on_llm_error(exception, run_id=uuid) assert tracer.runs == [compare_run] @@ -451,14 +454,14 @@ def test_tracer_nested_runs_on_error() -> None: serialized={"name": "chain"}, inputs={}, run_id=chain_uuid ) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid1, parent_run_id=chain_uuid, ) tracer.on_llm_end(response=LLMResult(generations=[[]]), run_id=llm_uuid1) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid2, parent_run_id=chain_uuid, @@ -471,7 +474,7 @@ def test_tracer_nested_runs_on_error() -> None: parent_run_id=chain_uuid, ) tracer.on_llm_start( - serialized={"name": "llm"}, + serialized=SERIALIZED, prompts=[], run_id=llm_uuid3, parent_run_id=tool_uuid, @@ -501,7 +504,7 @@ def test_tracer_nested_runs_on_error() -> None: extra={}, execution_order=2, child_execution_order=2, - serialized={"name": "llm"}, + serialized=SERIALIZED, session_id=TEST_SESSION_ID, error=None, prompts=[], @@ -515,7 +518,7 @@ def test_tracer_nested_runs_on_error() -> None: extra={}, execution_order=3, child_execution_order=3, - serialized={"name": "llm"}, + serialized=SERIALIZED, session_id=TEST_SESSION_ID, error=None, prompts=[], @@ -547,7 +550,7 @@ def test_tracer_nested_runs_on_error() -> None: extra={}, execution_order=5, child_execution_order=5, - serialized={"name": "llm"}, + serialized=SERIALIZED, session_id=TEST_SESSION_ID, error=repr(exception), prompts=[], diff --git a/tests/unit_tests/load/__snapshots__/test_dump.ambr b/tests/unit_tests/load/__snapshots__/test_dump.ambr new file mode 100644 index 0000000000000..e9f75fbafb891 --- /dev/null +++ b/tests/unit_tests/load/__snapshots__/test_dump.ambr @@ -0,0 +1,273 @@ +# serializer version: 1 +# name: test_person + ''' + { + "lc": 1, + "type": "constructor", + "id": [ + "test_dump", + "Person" + ], + "kwargs": { + "secret": { + "lc": 1, + "type": "secret", + "id": [ + "SECRET" + ] + }, + "you_can_see_me": "hello" + } + } + ''' +# --- +# name: test_person.1 + ''' + { + "lc": 1, + "type": "constructor", + "id": [ + "test_dump", + "SpecialPerson" + ], + "kwargs": { + "another_secret": { + "lc": 1, + "type": "secret", + "id": [ + "ANOTHER_SECRET" + ] + }, + "secret": { + "lc": 1, + "type": "secret", + "id": [ + "SECRET" + ] + }, + "another_visible": "bye", + "you_can_see_me": "hello" + } + } + ''' +# --- +# name: test_serialize_llmchain + ''' + { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "chains", + "llm", + "LLMChain" + ], + "kwargs": { + "llm": { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "llms", + "openai", + "OpenAI" + ], + "kwargs": { + "model": "davinci", + "temperature": 0.5, + "openai_api_key": { + "lc": 1, + "type": "secret", + "id": [ + "OPENAI_API_KEY" + ] + } + } + }, + "prompt": { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate" + ], + "kwargs": { + "input_variables": [ + "name" + ], + "template": "hello {name}!", + "template_format": "f-string" + } + } + } + } + ''' +# --- +# name: test_serialize_llmchain_chat + ''' + { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "chains", + "llm", + "LLMChain" + ], + "kwargs": { + "llm": { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "chat_models", + "openai", + "ChatOpenAI" + ], + "kwargs": { + "model": "davinci", + "temperature": 0.5, + "openai_api_key": "hello" + } + }, + "prompt": { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "prompts", + "chat", + "ChatPromptTemplate" + ], + "kwargs": { + "input_variables": [ + "name" + ], + "messages": [ + { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "prompts", + "chat", + "HumanMessagePromptTemplate" + ], + "kwargs": { + "prompt": { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate" + ], + "kwargs": { + "input_variables": [ + "name" + ], + "template": "hello {name}!", + "template_format": "f-string" + } + } + } + } + ] + } + } + } + } + ''' +# --- +# name: test_serialize_llmchain_with_non_serializable_arg + ''' + { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "chains", + "llm", + "LLMChain" + ], + "kwargs": { + "llm": { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "llms", + "openai", + "OpenAI" + ], + "kwargs": { + "model": "davinci", + "temperature": 0.5, + "openai_api_key": { + "lc": 1, + "type": "secret", + "id": [ + "OPENAI_API_KEY" + ] + }, + "client": { + "lc": 1, + "type": "not_implemented", + "id": [ + "openai", + "api_resources", + "completion", + "Completion" + ] + } + } + }, + "prompt": { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "prompts", + "prompt", + "PromptTemplate" + ], + "kwargs": { + "input_variables": [ + "name" + ], + "template": "hello {name}!", + "template_format": "f-string" + } + } + } + } + ''' +# --- +# name: test_serialize_openai_llm + ''' + { + "lc": 1, + "type": "constructor", + "id": [ + "langchain", + "llms", + "openai", + "OpenAI" + ], + "kwargs": { + "model": "davinci", + "temperature": 0.7, + "openai_api_key": { + "lc": 1, + "type": "secret", + "id": [ + "OPENAI_API_KEY" + ] + } + } + } + ''' +# --- diff --git a/tests/unit_tests/load/test_dump.py b/tests/unit_tests/load/test_dump.py new file mode 100644 index 0000000000000..45eab8eb570f8 --- /dev/null +++ b/tests/unit_tests/load/test_dump.py @@ -0,0 +1,103 @@ +"""Test for Serializable base class""" + +from typing import Any, Dict + +import pytest + +from langchain.callbacks.tracers.langchain import LangChainTracer +from langchain.chains.llm import LLMChain +from langchain.chat_models.openai import ChatOpenAI +from langchain.llms.openai import OpenAI +from langchain.load.dump import dumps +from langchain.load.serializable import Serializable +from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate +from langchain.prompts.prompt import PromptTemplate + + +class Person(Serializable): + secret: str + + you_can_see_me: str = "hello" + + @property + def lc_serializable(self) -> bool: + return True + + @property + def lc_secrets(self) -> Dict[str, str]: + return {"secret": "SECRET"} + + @property + def lc_attributes(self) -> Dict[str, str]: + return {"you_can_see_me": self.you_can_see_me} + + +class SpecialPerson(Person): + another_secret: str + + another_visible: str = "bye" + + # Gets merged with parent class's secrets + @property + def lc_secrets(self) -> Dict[str, str]: + return {"another_secret": "ANOTHER_SECRET"} + + # Gets merged with parent class's attributes + @property + def lc_attributes(self) -> Dict[str, str]: + return {"another_visible": self.another_visible} + + +class NotSerializable: + pass + + +def test_person(snapshot: Any) -> None: + p = Person(secret="hello") + assert dumps(p, pretty=True) == snapshot + sp = SpecialPerson(another_secret="Wooo", secret="Hmm") + assert dumps(sp, pretty=True) == snapshot + + +@pytest.mark.requires("openai") +def test_serialize_openai_llm(snapshot: Any) -> None: + llm = OpenAI( + model="davinci", + temperature=0.5, + openai_api_key="hello", + # This is excluded from serialization + callbacks=[LangChainTracer()], + ) + llm.temperature = 0.7 # this is reflected in serialization + assert dumps(llm, pretty=True) == snapshot + + +@pytest.mark.requires("openai") +def test_serialize_llmchain(snapshot: Any) -> None: + llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") + prompt = PromptTemplate.from_template("hello {name}!") + chain = LLMChain(llm=llm, prompt=prompt) + assert dumps(chain, pretty=True) == snapshot + + +@pytest.mark.requires("openai") +def test_serialize_llmchain_chat(snapshot: Any) -> None: + llm = ChatOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") + prompt = ChatPromptTemplate.from_messages( + [HumanMessagePromptTemplate.from_template("hello {name}!")] + ) + chain = LLMChain(llm=llm, prompt=prompt) + assert dumps(chain, pretty=True) == snapshot + + +@pytest.mark.requires("openai") +def test_serialize_llmchain_with_non_serializable_arg(snapshot: Any) -> None: + llm = OpenAI( + model="davinci", + temperature=0.5, + openai_api_key="hello", + client=NotSerializable, + ) + prompt = PromptTemplate.from_template("hello {name}!") + chain = LLMChain(llm=llm, prompt=prompt) + assert dumps(chain, pretty=True) == snapshot diff --git a/tests/unit_tests/load/test_load.py b/tests/unit_tests/load/test_load.py new file mode 100644 index 0000000000000..8062d49911eda --- /dev/null +++ b/tests/unit_tests/load/test_load.py @@ -0,0 +1,54 @@ +"""Test for Serializable base class""" + +import pytest + +from langchain.chains.llm import LLMChain +from langchain.llms.openai import OpenAI +from langchain.load.dump import dumps +from langchain.load.load import loads +from langchain.prompts.prompt import PromptTemplate + + +class NotSerializable: + pass + + +@pytest.mark.requires("openai") +def test_load_openai_llm() -> None: + llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") + llm_string = dumps(llm) + llm2 = loads(llm_string, secrets_map={"OPENAI_API_KEY": "hello"}) + + assert llm2 == llm + assert dumps(llm2) == llm_string + assert isinstance(llm2, OpenAI) + + +@pytest.mark.requires("openai") +def test_load_llmchain() -> None: + llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") + prompt = PromptTemplate.from_template("hello {name}!") + chain = LLMChain(llm=llm, prompt=prompt) + chain_string = dumps(chain) + chain2 = loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"}) + + assert chain2 == chain + assert dumps(chain2) == chain_string + assert isinstance(chain2, LLMChain) + assert isinstance(chain2.llm, OpenAI) + assert isinstance(chain2.prompt, PromptTemplate) + + +@pytest.mark.requires("openai") +def test_load_llmchain_with_non_serializable_arg() -> None: + llm = OpenAI( + model="davinci", + temperature=0.5, + openai_api_key="hello", + client=NotSerializable, + ) + prompt = PromptTemplate.from_template("hello {name}!") + chain = LLMChain(llm=llm, prompt=prompt) + chain_string = dumps(chain, pretty=True) + with pytest.raises(NotImplementedError): + loads(chain_string, secrets_map={"OPENAI_API_KEY": "hello"}) diff --git a/tests/unit_tests/test_dependencies.py b/tests/unit_tests/test_dependencies.py index 63342a25dc80f..16e930dcbaf6d 100644 --- a/tests/unit_tests/test_dependencies.py +++ b/tests/unit_tests/test_dependencies.py @@ -72,6 +72,7 @@ def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None: "pytest-socket", "pytest-watcher", "responses", + "syrupy", ] From a197acfcd37b7a0cabdf26a3584ffce3f7ddd9b0 Mon Sep 17 00:00:00 2001 From: Zander Chase <130414180+vowelparrot@users.noreply.github.com> Date: Sun, 11 Jun 2023 17:59:09 -0700 Subject: [PATCH 31/46] Update check (#6020) We were assigning the name as None in on_chat_model_start then not updating, resulting in a validation error. --- langchain/callbacks/tracers/langchain.py | 1 - langchain/callbacks/tracers/schemas.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/langchain/callbacks/tracers/langchain.py b/langchain/callbacks/tracers/langchain.py index de89f3aaf027e..9f734e2983b5e 100644 --- a/langchain/callbacks/tracers/langchain.py +++ b/langchain/callbacks/tracers/langchain.py @@ -53,7 +53,6 @@ def on_chat_model_start( execution_order = self._get_execution_order(parent_run_id_) chat_model_run = Run( id=run_id, - name=serialized.get("name"), parent_run_id=parent_run_id, serialized=serialized, inputs={"messages": [messages_to_dict(batch) for batch in messages]}, diff --git a/langchain/callbacks/tracers/schemas.py b/langchain/callbacks/tracers/schemas.py index bc8abeae06a4c..74e4d66330b6f 100644 --- a/langchain/callbacks/tracers/schemas.py +++ b/langchain/callbacks/tracers/schemas.py @@ -123,7 +123,7 @@ class Run(RunBase): @root_validator(pre=True) def assign_name(cls, values: dict) -> dict: """Assign name to the run.""" - if "name" not in values: + if values.get("name") is None: if "name" in values["serialized"]: values["name"] = values["serialized"]["name"] elif "id" in values["serialized"]: From 18f5c985d9013280a727f784576ea5c3e35f2122 Mon Sep 17 00:00:00 2001 From: ju-bezdek Date: Mon, 12 Jun 2023 04:32:24 +0200 Subject: [PATCH 32/46] Langchain decorators (#6017) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added description of LangChain Decorators ✨ into the integration section #### Before submitting #### Who can review? Tag maintainers/contributors who might be interested: @hwchase17 --- docs/integrations/langchain_decorators.md | 368 ++++++++++++++++++++++ 1 file changed, 368 insertions(+) create mode 100644 docs/integrations/langchain_decorators.md diff --git a/docs/integrations/langchain_decorators.md b/docs/integrations/langchain_decorators.md new file mode 100644 index 0000000000000..0e4b631d5db75 --- /dev/null +++ b/docs/integrations/langchain_decorators.md @@ -0,0 +1,368 @@ +# LangChain Decorators ✨ + +lanchchain decorators is a layer on the top of LangChain that provides syntactic sugar 🍭 for writing custom langchain prompts and chains + +For Feedback, Issues, Contributions - please raise an issue here: +[ju-bezdek/langchain-decorators](https://github.com/ju-bezdek/langchain-decorators) + + + +Main principles and benefits: + +- more `pythonic` way of writing code +- write multiline prompts that wont break your code flow with indentation +- making use of IDE in-built support for **hinting**, **type checking** and **popup with docs** to quickly peek in the function to see the prompt, parameters it consumes etc. +- leverage all the power of 🦜🔗 LangChain ecosystem +- adding support for **optional parameters** +- easily share parameters between the prompts by binding them to one class + + + +Here is a simple example of a code written with **LangChain Decorators ✨** + +``` python + +@llm_prompt +def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers")->str: + """ + Write me a short header for my post about {topic} for {platform} platform. + It should be for {audience} audience. + (Max 15 words) + """ + return + +# run it naturaly +write_me_short_post(topic="starwars") +# or +write_me_short_post(topic="starwars", platform="redit") +``` + +# Quick start +## Installation +```bash +pip install langchain_decorators +``` + +## Examples + +Good idea on how to start is to review the examples here: + - [jupyter notebook](https://github.com/ju-bezdek/langchain-decorators/blob/main/example_notebook.ipynb) + - [colab notebook](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=N4cf__D0E2Yk) + +# Defining other parameters +Here we are just marking a function as a prompt with `llm_prompt` decorator, turning it effectively into a LLMChain. Instead of running it + + +Standard LLMchain takes much more init parameter than just inputs_variables and prompt... here is this implementation detail hidden in the decorator. +Here is how it works: + +1. Using **Global settings**: + +``` python +# define global settings for all prompty (if not set - chatGPT is the current default) +from langchain_decorators import GlobalSettings + +GlobalSettings.define_settings( + default_llm=ChatOpenAI(temperature=0.0), this is default... can change it here globally + default_streaming_llm=ChatOpenAI(temperature=0.0,streaming=True), this is default... can change it here for all ... will be used for streaming +) +``` + +2. Using predefined **prompt types** + +``` python +#You can change the default prompt types +from langchain_decorators import PromptTypes, PromptTypeSettings + +PromptTypes.AGENT_REASONING.llm = ChatOpenAI() + +# Or you can just define your own ones: +class MyCustomPromptTypes(PromptTypes): + GPT4=PromptTypeSettings(llm=ChatOpenAI(model="gpt-4")) + +@llm_prompt(prompt_type=MyCustomPromptTypes.GPT4) +def write_a_complicated_code(app_idea:str)->str: + ... + +``` + +3. Define the settings **directly in the decorator** + +``` python +from langchain.llms import OpenAI + +@llm_prompt( + llm=OpenAI(temperature=0.7), + stop_tokens=["\nObservation"], + ... + ) +def creative_writer(book_title:str)->str: + ... +``` + +## Passing a memory and/or callbacks: + +To pass any of these, just declare them in the function (or use kwargs to pass anything) + +```python + +@llm_prompt() +async def write_me_short_post(topic:str, platform:str="twitter", memory:SimpleMemory = None): + """ + {history_key} + Write me a short header for my post about {topic} for {platform} platform. + It should be for {audience} audience. + (Max 15 words) + """ + pass + +await write_me_short_post(topic="old movies") + +``` + +# Simplified streaming + +If we wan't to leverage streaming: + - we need to define prompt as async function + - turn on the streaming on the decorator, or we can define PromptType with streaming on + - capture the stream using StreamingContext + +This way we just mark which prompt should be streamed, not needing to tinker with what LLM should we use, passing around the creating and distribute streaming handler into particular part of our chain... just turn the streaming on/off on prompt/prompt type... + +The streaming will happen only if we call it in streaming context ... there we can define a simple function to handle the stream + +``` python +# this code example is complete and should run as it is + +from langchain_decorators import StreamingContext, llm_prompt + +# this will mark the prompt for streaming (useful if we want stream just some prompts in our app... but don't want to pass distribute the callback handlers) +# note that only async functions can be streamed (will get an error if it's not) +@llm_prompt(capture_stream=True) +async def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers"): + """ + Write me a short header for my post about {topic} for {platform} platform. + It should be for {audience} audience. + (Max 15 words) + """ + pass + + + +# just an arbitrary function to demonstrate the streaming... wil be some websockets code in the real world +tokens=[] +def capture_stream_func(new_token:str): + tokens.append(new_token) + +# if we want to capture the stream, we need to wrap the execution into StreamingContext... +# this will allow us to capture the stream even if the prompt call is hidden inside higher level method +# only the prompts marked with capture_stream will be captured here +with StreamingContext(stream_to_stdout=True, callback=capture_stream_func): + result = await run_prompt() + print("Stream finished ... we can distinguish tokens thanks to alternating colors") + + +print("\nWe've captured",len(tokens),"tokens🎉\n") +print("Here is the result:") +print(result) +``` + + +# Prompt declarations +By default the prompt is is the whole function docs, unless you mark your prompt + +## Documenting your prompt + +We can specify what part of our docs is the prompt definition, by specifying a code block with **** language tag + +``` python +@llm_prompt +def write_me_short_post(topic:str, platform:str="twitter", audience:str = "developers"): + """ + Here is a good way to write a prompt as part of a function docstring, with additional documentation for devs. + + It needs to be a code block, marked as a `` language + ``` + Write me a short header for my post about {topic} for {platform} platform. + It should be for {audience} audience. + (Max 15 words) + ``` + + Now only to code block above will be used as a prompt, and the rest of the docstring will be used as a description for developers. + (It has also a nice benefit that IDE (like VS code) will display the prompt properly (not trying to parse it as markdown, and thus not showing new lines properly)) + """ + return +``` + +## Chat messages prompt + +For chat models is very useful to define prompt as a set of message templates... here is how to do it: + +``` python +@llm_prompt +def simulate_conversation(human_input:str, agent_role:str="a pirate"): + """ + ## System message + - note the `:system` sufix inside the tag + + + ``` + You are a {agent_role} hacker. You mus act like one. + You reply always in code, using python or javascript code block... + for example: + + ... do not reply with anything else.. just with code - respecting your role. + ``` + + # human message + (we are using the real role that are enforced by the LLM - GPT supports system, assistant, user) + ``` + Helo, who are you + ``` + a reply: + + + ``` + \``` python <<- escaping inner code block with \ that should be part of the prompt + def hello(): + print("Argh... hello you pesky pirate") + \``` + ``` + + we can also add some history using placeholder + ``` + {history} + ``` + ``` + {human_input} + ``` + + Now only to code block above will be used as a prompt, and the rest of the docstring will be used as a description for developers. + (It has also a nice benefit that IDE (like VS code) will display the prompt properly (not trying to parse it as markdown, and thus not showing new lines properly)) + """ + pass + +``` + +the roles here are model native roles (assistant, user, system for chatGPT) + + + +# Optional sections +- you can define a whole sections of your prompt that should be optional +- if any input in the section is missing, the whole section wont be rendered + +the syntax for this is as follows: + +``` python +@llm_prompt +def prompt_with_optional_partials(): + """ + this text will be rendered always, but + + {? anything inside this block will be rendered only if all the {value}s parameters are not empty (None | "") ?} + + you can also place it in between the words + this too will be rendered{? , but + this block will be rendered only if {this_value} and {this_value} + is not empty?} ! + """ +``` + + +# Output parsers + +- llm_prompt decorator natively tries to detect the best output parser based on the output type. (if not set, it returns the raw string) +- list, dict and pydantic outputs are also supported natively (automaticaly) + +``` python +# this code example is complete and should run as it is + +from langchain_decorators import llm_prompt + +@llm_prompt +def write_name_suggestions(company_business:str, count:int)->list: + """ Write me {count} good name suggestions for company that {company_business} + """ + pass + +write_name_suggestions(company_business="sells cookies", count=5) +``` + +## More complex structures + +for dict / pydantic you need to specify the formatting instructions... +this can be tedious, that's why you can let the output parser gegnerate you the instructions based on the model (pydantic) + +``` python +from langchain_decorators import llm_prompt +from pydantic import BaseModel, Field + + +class TheOutputStructureWeExpect(BaseModel): + name:str = Field (description="The name of the company") + headline:str = Field( description="The description of the company (for landing page)") + employees:list[str] = Field(description="5-8 fake employee names with their positions") + +@llm_prompt() +def fake_company_generator(company_business:str)->TheOutputStructureWeExpect: + """ Generate a fake company that {company_business} + {FORMAT_INSTRUCTIONS} + """ + return + +company = fake_company_generator(company_business="sells cookies") + +# print the result nicely formatted +print("Company name: ",company.name) +print("company headline: ",company.headline) +print("company employees: ",company.employees) + +``` + + +# Binding the prompt to an object + +``` python +from pydantic import BaseModel +from langchain_decorators import llm_prompt + +class AssistantPersonality(BaseModel): + assistant_name:str + assistant_role:str + field:str + + @property + def a_property(self): + return "whatever" + + def hello_world(self, function_kwarg:str=None): + """ + We can reference any {field} or {a_property} inside our prompt... and combine it with {function_kwarg} in the method + """ + + + @llm_prompt + def introduce_your_self(self)->str: + """ + ```  + You are an assistant named {assistant_name}. + Your role is to act as {assistant_role} + ``` + ``` + Introduce your self (in less than 20 words) + ``` + """ + + + +personality = AssistantPersonality(assistant_name="John", assistant_role="a pirate") + +print(personality.introduce_your_self(personality)) +``` + + +# More examples: + +- these and few more examples are also available in the [colab notebook here](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=N4cf__D0E2Yk) +- including the [ReAct Agent re-implementation](https://colab.research.google.com/drive/1no-8WfeP6JaLD9yUtkPgym6x0G9ZYZOG#scrollTo=3bID5fryE2Yp) using purely langchain decorators From 2c9619bc1d6412f4846232eac502b2bbe5597384 Mon Sep 17 00:00:00 2001 From: Zander Chase <130414180+vowelparrot@users.noreply.github.com> Date: Sun, 11 Jun 2023 19:34:26 -0700 Subject: [PATCH 33/46] Remove from PR template (#6018) --- .github/ISSUE_TEMPLATE/bug-report.yml | 2 +- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 27ba3771bf663..702830214e3fa 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -46,7 +46,7 @@ body: - @agola11 Tools / Toolkits - - @vowelparrot + - ... placeholder: "@Username ..." diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 50a840f491918..759032c2c62a1 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -48,7 +48,7 @@ Tag maintainers/contributors who might be interested: - @agola11 Agents / Tools / Toolkits - - @vowelparrot + - @hwchase17 VectorStores / Retrievers / Memory - @dev2049 From c5bce4a465912d6127e0cbe2c6e59f4ab8b9583b Mon Sep 17 00:00:00 2001 From: ljeagle Date: Mon, 12 Jun 2023 10:35:03 +0800 Subject: [PATCH 34/46] add from_documents interface in awadb vector store (#6023) added new interface from_documents in awadb vector store @dev2049 --------- Co-authored-by: vincent --- langchain/vectorstores/awadb.py | 71 ++++++++++++++++++++++++++++----- 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/langchain/vectorstores/awadb.py b/langchain/vectorstores/awadb.py index 9c7d8a385bf8e..1de285955a3f9 100644 --- a/langchain/vectorstores/awadb.py +++ b/langchain/vectorstores/awadb.py @@ -49,8 +49,7 @@ def __init__( self.awadb_client = awadb.Client() self.awadb_client.Create(table_name) - if embedding_model is not None: - self.embedding_model = embedding_model + self.embedding_model = embedding_model self.added_doc_count = 0 @@ -121,6 +120,11 @@ def similarity_search( embedding = None if self.embedding_model is not None: embedding = self.embedding_model.embed_query(query) + else: + from awadb import llm_embedding + + llm = llm_embedding.LLMEmbedding() + embedding = llm.Embedding(query) return self.similarity_search_by_vector(embedding, k) @@ -141,13 +145,18 @@ def similarity_search_with_score( embedding = None if self.embedding_model is not None: embedding = self.embedding_model.embed_query(query) + else: + from awadb import llm_embedding - show_results = self.awadb_client.Search(embedding, k) + llm = llm_embedding.LLMEmbedding() + embedding = llm.Embedding(query) + + # show_results = self.awadb_client.Search(embedding, k) results: List[Tuple[Document, float]] = [] - if show_results.__len__() == 0: - return results + # if show_results.__len__() == 0: + # return results scores: List[float] = [] retrieval_docs = self.similarity_search_by_vector(embedding, k, scores) @@ -159,7 +168,7 @@ def similarity_search_with_score( L2_Norm = pow(L2_Norm, 0.5) doc_no = 0 for doc in retrieval_docs: - doc_tuple = (doc, 1 - scores[doc_no] / L2_Norm) + doc_tuple = (doc, 1 - (scores[doc_no] / L2_Norm)) results.append(doc_tuple) doc_no = doc_no + 1 @@ -208,7 +217,7 @@ def similarity_search_with_relevance_scores( def similarity_search_by_vector( self, - embedding: List[float], + embedding: Optional[List[float]] = None, k: int = DEFAULT_TOPN, scores: Optional[list] = None, **kwargs: Any, @@ -226,10 +235,13 @@ def similarity_search_by_vector( if self.awadb_client is None: raise ValueError("AwaDB client is None!!!") - show_results = self.awadb_client.Search(embedding, k) - results: List[Document] = [] + if embedding is None: + return results + + show_results = self.awadb_client.Search(embedding, k) + if show_results.__len__() == 0: return results @@ -237,7 +249,11 @@ def similarity_search_by_vector( content = "" meta_data = {} for item_key in item_detail: - if item_key == "Field@0": # text for the document + if ( + item_key == "Field@0" and self.embedding_model is not None + ): # text for the document + content = item_detail[item_key] + elif self.embedding_model is None and item_key == "embedding_text": content = item_detail[item_key] elif item_key == "Field@1": # embedding field for the document continue @@ -282,3 +298,38 @@ def from_texts( ) awadb_client.add_texts(texts=texts, metadatas=metadatas) return awadb_client + + @classmethod + def from_documents( + cls: Type[AwaDB], + documents: List[Document], + embedding: Optional[Embeddings] = None, + table_name: str = _DEFAULT_TABLE_NAME, + logging_and_data_dir: Optional[str] = None, + client: Optional[awadb.Client] = None, + **kwargs: Any, + ) -> AwaDB: + """Create an AwaDB vectorstore from a list of documents. + + If a logging_and_data_dir specified, the table will be persisted there. + + Args: + documents (List[Document]): List of documents to add to the vectorstore. + embedding (Optional[Embeddings]): Embedding function. Defaults to None. + table_name (str): Name of the collection to create. + logging_and_data_dir (Optional[str]): Directory to persist the table. + client (Optional[awadb.Client]): AwaDB client + + Returns: + AwaDB: AwaDB vectorstore. + """ + texts = [doc.page_content for doc in documents] + metadatas = [doc.metadata for doc in documents] + return cls.from_texts( + texts=texts, + embedding=embedding, + metadatas=metadatas, + table_name=table_name, + logging_and_data_dir=logging_and_data_dir, + client=client, + ) From e05997c25e60934537b7dd67a3ae139480e80727 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sun, 11 Jun 2023 20:56:51 -0700 Subject: [PATCH 35/46] Harrison/hologres (#6012) Co-authored-by: Changgeng Zhao Co-authored-by: Changgeng Zhao --- .../vectorstores/examples/hologres.ipynb | 157 ++++++ langchain/vectorstores/__init__.py | 2 + langchain/vectorstores/hologres.py | 506 ++++++++++++++++++ .../vectorstores/test_hologres.py | 142 +++++ 4 files changed, 807 insertions(+) create mode 100644 docs/modules/indexes/vectorstores/examples/hologres.ipynb create mode 100644 langchain/vectorstores/hologres.py create mode 100644 tests/integration_tests/vectorstores/test_hologres.py diff --git a/docs/modules/indexes/vectorstores/examples/hologres.ipynb b/docs/modules/indexes/vectorstores/examples/hologres.ipynb new file mode 100644 index 0000000000000..1d671cd6bded2 --- /dev/null +++ b/docs/modules/indexes/vectorstores/examples/hologres.ipynb @@ -0,0 +1,157 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Hologres\n", + "\n", + ">[Hologres](https://www.alibabacloud.com/help/en/hologres/latest/introduction) is a unified real-time data warehousing service developed by Alibaba Cloud. You can use Hologres to write, update, process, and analyze large amounts of data in real time. \n", + ">Hologres supports standard SQL syntax, is compatible with PostgreSQL, and supports most PostgreSQL functions. Hologres supports online analytical processing (OLAP) and ad hoc analysis for up to petabytes of data, and provides high-concurrency and low-latency online data services. \n", + "\n", + ">Hologres provides **vector database** functionality by adopting [Proxima](https://www.alibabacloud.com/help/en/hologres/latest/vector-processing).\n", + ">Proxima is a high-performance software library developed by Alibaba DAMO Academy. It allows you to search for the nearest neighbors of vectors. Proxima provides higher stability and performance than similar open source software such as Faiss. Proxima allows you to search for similar text or image embeddings with high throughput and low latency. Hologres is deeply integrated with Proxima to provide a high-performance vector search service.\n", + "\n", + "This notebook shows how to use functionality related to the `Hologres Proxima` vector database.\n", + "Click [here](https://www.alibabacloud.com/zh/product/hologres) to fast deploy a Hologres cloud instance." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Hologres" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Split documents and get embeddings by call OpenAI API" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.document_loaders import TextLoader\n", + "\n", + "loader = TextLoader(\"../../../state_of_the_union.txt\")\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "embeddings = OpenAIEmbeddings()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Connect to Hologres by setting related ENVIRONMENTS.\n", + "```\n", + "export PG_HOST={host}\n", + "export PG_PORT={port} # Optional, default is 80\n", + "export PG_DATABASE={db_name} # Optional, default is postgres\n", + "export PG_USER={username}\n", + "export PG_PASSWORD={password}\n", + "```\n", + "\n", + "Then store your embeddings and documents into Hologres" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "connection_string = Hologres.connection_string_from_db_params(\n", + " host=os.environ.get(\"PGHOST\", \"localhost\"),\n", + " port=int(os.environ.get(\"PGPORT\", \"80\")),\n", + " database=os.environ.get(\"PGDATABASE\", \"postgres\"),\n", + " user=os.environ.get(\"PGUSER\", \"postgres\"),\n", + " password=os.environ.get(\"PGPASSWORD\", \"postgres\"),\n", + ")\n", + "\n", + "vector_db = Hologres.from_documents(\n", + " docs,\n", + " embeddings,\n", + " connection_string=connection_string,\n", + " table_name=\"langchain_example_embeddings\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Query and retrieve data" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "query = \"What did the president say about Ketanji Brown Jackson\"\n", + "docs = vector_db.similarity_search(query)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" + ] + } + ], + "source": [ + "print(docs[0].page_content)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/langchain/vectorstores/__init__.py b/langchain/vectorstores/__init__.py index 3a932c23825c8..c59b06fd65a23 100644 --- a/langchain/vectorstores/__init__.py +++ b/langchain/vectorstores/__init__.py @@ -10,6 +10,7 @@ from langchain.vectorstores.docarray import DocArrayHnswSearch, DocArrayInMemorySearch from langchain.vectorstores.elastic_vector_search import ElasticVectorSearch from langchain.vectorstores.faiss import FAISS +from langchain.vectorstores.hologres import Hologres from langchain.vectorstores.lancedb import LanceDB from langchain.vectorstores.matching_engine import MatchingEngine from langchain.vectorstores.milvus import Milvus @@ -57,6 +58,7 @@ "DocArrayHnswSearch", "DocArrayInMemorySearch", "Typesense", + "Hologres", "Clickhouse", "ClickhouseSettings", "Tigris", diff --git a/langchain/vectorstores/hologres.py b/langchain/vectorstores/hologres.py new file mode 100644 index 0000000000000..b19dbbbb82f1f --- /dev/null +++ b/langchain/vectorstores/hologres.py @@ -0,0 +1,506 @@ +"""VectorStore wrapper around a Hologres database.""" +from __future__ import annotations + +import json +import logging +import uuid +from typing import Any, Dict, Iterable, List, Optional, Tuple, Type + +from langchain.docstore.document import Document +from langchain.embeddings.base import Embeddings +from langchain.utils import get_from_dict_or_env +from langchain.vectorstores.base import VectorStore + +ADA_TOKEN_COUNT = 1536 +_LANGCHAIN_DEFAULT_TABLE_NAME = "langchain_pg_embedding" + + +class HologresWrapper: + def __init__(self, connection_string: str, ndims: int, table_name: str) -> None: + import psycopg2 + + self.table_name = table_name + self.conn = psycopg2.connect(connection_string) + self.cursor = self.conn.cursor() + self.conn.autocommit = False + self.ndims = ndims + + def create_vector_extension(self) -> None: + self.cursor.execute("create extension if not exists proxima") + self.conn.commit() + + def create_table(self, drop_if_exist: bool = True) -> None: + if drop_if_exist: + self.cursor.execute(f"drop table if exists {self.table_name}") + self.conn.commit() + + self.cursor.execute( + f"""create table if not exists {self.table_name} ( +id text, +embedding float4[] check(array_ndims(embedding) = 1 and \ +array_length(embedding, 1) = {self.ndims}), +metadata json, +document text);""" + ) + self.cursor.execute( + f"call set_table_property('{self.table_name}'" + + """, 'proxima_vectors', +'{"embedding":{"algorithm":"Graph", +"distance_method":"SquaredEuclidean", +"build_params":{"min_flush_proxima_row_count" : 1, +"min_compaction_proxima_row_count" : 1, +"max_total_size_to_merge_mb" : 2000}}}');""" + ) + self.conn.commit() + + def get_by_id(self, id: str) -> List[Tuple]: + statement = ( + f"select id, embedding, metadata, " + f"document from {self.table_name} where id = %s;" + ) + self.cursor.execute( + statement, + (id), + ) + self.conn.commit() + return self.cursor.fetchall() + + def insert( + self, + embedding: List[float], + metadata: dict, + document: str, + id: Optional[str] = None, + ) -> None: + self.cursor.execute( + f'insert into "{self.table_name}" ' + f"values (%s, array{json.dumps(embedding)}::float4[], %s, %s)", + (id if id is not None else "null", json.dumps(metadata), document), + ) + self.conn.commit() + + def query_nearest_neighbours( + self, embedding: List[float], k: int, filter: Optional[Dict[str, str]] = None + ) -> List[Tuple[str, str, float]]: + params = [] + filter_clause = "" + if filter is not None: + conjuncts = [] + for key, val in filter.items(): + conjuncts.append("metadata->>%s=%s") + params.append(key) + params.append(val) + filter_clause = "where " + " and ".join(conjuncts) + + sql = ( + f"select document, metadata::text, " + f"pm_approx_squared_euclidean_distance(array{json.dumps(embedding)}" + f"::float4[], embedding) as distance from" + f" {self.table_name} {filter_clause} order by distance asc limit {k};" + ) + self.cursor.execute(sql, tuple(params)) + self.conn.commit() + return self.cursor.fetchall() + + +class Hologres(VectorStore): + """ + VectorStore implementation using Hologres. + - `connection_string` is a hologres connection string. + - `embedding_function` any embedding function implementing + `langchain.embeddings.base.Embeddings` interface. + - `ndims` is the number of dimensions of the embedding output. + - `table_name` is the name of the table to store embeddings and data. + (default: langchain_pg_embedding) + - NOTE: The table will be created when initializing the store (if not exists) + So, make sure the user has the right permissions to create tables. + - `pre_delete_table` if True, will delete the table if it exists. + (default: False) + - Useful for testing. + """ + + def __init__( + self, + connection_string: str, + embedding_function: Embeddings, + ndims: int = ADA_TOKEN_COUNT, + table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, + pre_delete_table: bool = False, + logger: Optional[logging.Logger] = None, + ) -> None: + self.connection_string = connection_string + self.ndims = ndims + self.table_name = table_name + self.embedding_function = embedding_function + self.pre_delete_table = pre_delete_table + self.logger = logger or logging.getLogger(__name__) + self.__post_init__() + + def __post_init__( + self, + ) -> None: + """ + Initialize the store. + """ + self.storage = HologresWrapper( + self.connection_string, self.ndims, self.table_name + ) + self.create_vector_extension() + self.create_table() + + def create_vector_extension(self) -> None: + try: + self.storage.create_vector_extension() + except Exception as e: + self.logger.exception(e) + raise e + + def create_table(self) -> None: + self.storage.create_table(self.pre_delete_table) + + @classmethod + def __from( + cls, + texts: List[str], + embeddings: List[List[float]], + embedding_function: Embeddings, + metadatas: Optional[List[dict]] = None, + ids: Optional[List[str]] = None, + ndims: int = ADA_TOKEN_COUNT, + table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, + pre_delete_table: bool = False, + **kwargs: Any, + ) -> Hologres: + if ids is None: + ids = [str(uuid.uuid1()) for _ in texts] + + if not metadatas: + metadatas = [{} for _ in texts] + + connection_string = cls.get_connection_string(kwargs) + + store = cls( + connection_string=connection_string, + embedding_function=embedding_function, + ndims=ndims, + table_name=table_name, + pre_delete_table=pre_delete_table, + ) + + store.add_embeddings( + texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs + ) + + return store + + def add_embeddings( + self, + texts: Iterable[str], + embeddings: List[List[float]], + metadatas: List[dict], + ids: List[str], + **kwargs: Any, + ) -> None: + """Add embeddings to the vectorstore. + + Args: + texts: Iterable of strings to add to the vectorstore. + embeddings: List of list of embedding vectors. + metadatas: List of metadatas associated with the texts. + kwargs: vectorstore specific parameters + """ + try: + for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): + self.storage.insert(embedding, metadata, text, id) + except Exception as e: + self.logger.exception(e) + self.storage.conn.commit() + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[dict]] = None, + ids: Optional[List[str]] = None, + **kwargs: Any, + ) -> List[str]: + """Run more texts through the embeddings and add to the vectorstore. + + Args: + texts: Iterable of strings to add to the vectorstore. + metadatas: Optional list of metadatas associated with the texts. + kwargs: vectorstore specific parameters + + Returns: + List of ids from adding the texts into the vectorstore. + """ + if ids is None: + ids = [str(uuid.uuid1()) for _ in texts] + + embeddings = self.embedding_function.embed_documents(list(texts)) + + if not metadatas: + metadatas = [{} for _ in texts] + + self.add_embeddings(texts, embeddings, metadatas, ids, **kwargs) + + return ids + + def similarity_search( + self, + query: str, + k: int = 4, + filter: Optional[dict] = None, + **kwargs: Any, + ) -> List[Document]: + """Run similarity search with Hologres with distance. + + Args: + query (str): Query text to search for. + k (int): Number of results to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List of Documents most similar to the query. + """ + embedding = self.embedding_function.embed_query(text=query) + return self.similarity_search_by_vector( + embedding=embedding, + k=k, + filter=filter, + ) + + def similarity_search_by_vector( + self, + embedding: List[float], + k: int = 4, + filter: Optional[dict] = None, + **kwargs: Any, + ) -> List[Document]: + """Return docs most similar to embedding vector. + + Args: + embedding: Embedding to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List of Documents most similar to the query vector. + """ + docs_and_scores = self.similarity_search_with_score_by_vector( + embedding=embedding, k=k, filter=filter + ) + return [doc for doc, _ in docs_and_scores] + + def similarity_search_with_score( + self, + query: str, + k: int = 4, + filter: Optional[dict] = None, + ) -> List[Tuple[Document, float]]: + """Return docs most similar to query. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. + + Returns: + List of Documents most similar to the query and score for each + """ + embedding = self.embedding_function.embed_query(query) + docs = self.similarity_search_with_score_by_vector( + embedding=embedding, k=k, filter=filter + ) + return docs + + def similarity_search_with_score_by_vector( + self, + embedding: List[float], + k: int = 4, + filter: Optional[dict] = None, + ) -> List[Tuple[Document, float]]: + results: List[Tuple[str, str, float]] = self.storage.query_nearest_neighbours( + embedding, k, filter + ) + + docs = [ + ( + Document( + page_content=result[0], + metadata=json.loads(result[1]), + ), + result[2], + ) + for result in results + ] + return docs + + @classmethod + def from_texts( + cls: Type[Hologres], + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + ndims: int = ADA_TOKEN_COUNT, + table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, + ids: Optional[List[str]] = None, + pre_delete_table: bool = False, + **kwargs: Any, + ) -> Hologres: + """ + Return VectorStore initialized from texts and embeddings. + Postgres connection string is required + "Either pass it as a parameter + or set the HOLOGRES_CONNECTION_STRING environment variable. + """ + embeddings = embedding.embed_documents(list(texts)) + + return cls.__from( + texts, + embeddings, + embedding, + metadatas=metadatas, + ids=ids, + ndims=ndims, + table_name=table_name, + pre_delete_table=pre_delete_table, + **kwargs, + ) + + @classmethod + def from_embeddings( + cls, + text_embeddings: List[Tuple[str, List[float]]], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + ndims: int = ADA_TOKEN_COUNT, + table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, + ids: Optional[List[str]] = None, + pre_delete_table: bool = False, + **kwargs: Any, + ) -> Hologres: + """Construct Hologres wrapper from raw documents and pre- + generated embeddings. + + Return VectorStore initialized from documents and embeddings. + Postgres connection string is required + "Either pass it as a parameter + or set the HOLOGRES_CONNECTION_STRING environment variable. + + Example: + .. code-block:: python + + from langchain import Hologres + from langchain.embeddings import OpenAIEmbeddings + embeddings = OpenAIEmbeddings() + text_embeddings = embeddings.embed_documents(texts) + text_embedding_pairs = list(zip(texts, text_embeddings)) + faiss = Hologres.from_embeddings(text_embedding_pairs, embeddings) + """ + texts = [t[0] for t in text_embeddings] + embeddings = [t[1] for t in text_embeddings] + + return cls.__from( + texts, + embeddings, + embedding, + metadatas=metadatas, + ids=ids, + ndims=ndims, + table_name=table_name, + pre_delete_table=pre_delete_table, + **kwargs, + ) + + @classmethod + def from_existing_index( + cls: Type[Hologres], + embedding: Embeddings, + ndims: int = ADA_TOKEN_COUNT, + table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, + pre_delete_table: bool = False, + **kwargs: Any, + ) -> Hologres: + """ + Get intsance of an existing Hologres store.This method will + return the instance of the store without inserting any new + embeddings + """ + + connection_string = cls.get_connection_string(kwargs) + + store = cls( + connection_string=connection_string, + ndims=ndims, + table_name=table_name, + embedding_function=embedding, + pre_delete_table=pre_delete_table, + ) + + return store + + @classmethod + def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: + connection_string: str = get_from_dict_or_env( + data=kwargs, + key="connection_string", + env_key="HOLOGRES_CONNECTION_STRING", + ) + + if not connection_string: + raise ValueError( + "Postgres connection string is required" + "Either pass it as a parameter" + "or set the HOLOGRES_CONNECTION_STRING environment variable." + ) + + return connection_string + + @classmethod + def from_documents( + cls: Type[Hologres], + documents: List[Document], + embedding: Embeddings, + ndims: int = ADA_TOKEN_COUNT, + table_name: str = _LANGCHAIN_DEFAULT_TABLE_NAME, + ids: Optional[List[str]] = None, + pre_delete_collection: bool = False, + **kwargs: Any, + ) -> Hologres: + """ + Return VectorStore initialized from documents and embeddings. + Postgres connection string is required + "Either pass it as a parameter + or set the HOLOGRES_CONNECTION_STRING environment variable. + """ + + texts = [d.page_content for d in documents] + metadatas = [d.metadata for d in documents] + connection_string = cls.get_connection_string(kwargs) + + kwargs["connection_string"] = connection_string + + return cls.from_texts( + texts=texts, + pre_delete_collection=pre_delete_collection, + embedding=embedding, + metadatas=metadatas, + ids=ids, + ndims=ndims, + table_name=table_name, + **kwargs, + ) + + @classmethod + def connection_string_from_db_params( + cls, + host: str, + port: int, + database: str, + user: str, + password: str, + ) -> str: + """Return connection string from database parameters.""" + return ( + f"dbname={database} user={user} password={password} host={host} port={port}" + ) diff --git a/tests/integration_tests/vectorstores/test_hologres.py b/tests/integration_tests/vectorstores/test_hologres.py new file mode 100644 index 0000000000000..1e11575317905 --- /dev/null +++ b/tests/integration_tests/vectorstores/test_hologres.py @@ -0,0 +1,142 @@ +"""Test Hologres functionality.""" +import os +from typing import List + +from langchain.docstore.document import Document +from langchain.vectorstores.hologres import Hologres +from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings + +CONNECTION_STRING = Hologres.connection_string_from_db_params( + host=os.environ.get("TEST_HOLOGRES_HOST", "localhost"), + port=int(os.environ.get("TEST_HOLOGRES_PORT", "80")), + database=os.environ.get("TEST_HOLOGRES_DATABASE", "postgres"), + user=os.environ.get("TEST_HOLOGRES_USER", "postgres"), + password=os.environ.get("TEST_HOLOGRES_PASSWORD", "postgres"), +) + + +ADA_TOKEN_COUNT = 1536 + + +class FakeEmbeddingsWithAdaDimension(FakeEmbeddings): + """Fake embeddings functionality for testing.""" + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Return simple embeddings.""" + return [ + [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(i)] for i in range(len(texts)) + ] + + def embed_query(self, text: str) -> List[float]: + """Return simple embeddings.""" + return [float(1.0)] * (ADA_TOKEN_COUNT - 1) + [float(0.0)] + + +def test_hologres() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + docsearch = Hologres.from_texts( + texts=texts, + table_name="test_table", + embedding=FakeEmbeddingsWithAdaDimension(), + connection_string=CONNECTION_STRING, + pre_delete_table=True, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + +def test_hologres_embeddings() -> None: + """Test end to end construction with embeddings and search.""" + texts = ["foo", "bar", "baz"] + text_embeddings = FakeEmbeddingsWithAdaDimension().embed_documents(texts) + text_embedding_pairs = list(zip(texts, text_embeddings)) + docsearch = Hologres.from_embeddings( + text_embeddings=text_embedding_pairs, + table_name="test_table", + embedding=FakeEmbeddingsWithAdaDimension(), + connection_string=CONNECTION_STRING, + pre_delete_table=True, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo")] + + +def test_hologres_with_metadatas() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Hologres.from_texts( + texts=texts, + table_name="test_table", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_table=True, + ) + output = docsearch.similarity_search("foo", k=1) + assert output == [Document(page_content="foo", metadata={"page": "0"})] + + +def test_hologres_with_metadatas_with_scores() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Hologres.from_texts( + texts=texts, + table_name="test_table", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_table=True, + ) + output = docsearch.similarity_search_with_score("foo", k=1) + assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] + + +def test_hologres_with_filter_match() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Hologres.from_texts( + texts=texts, + table_name="test_table_filter", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_table=True, + ) + output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "0"}) + assert output == [(Document(page_content="foo", metadata={"page": "0"}), 0.0)] + + +def test_hologres_with_filter_distant_match() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Hologres.from_texts( + texts=texts, + table_name="test_table_filter", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_table=True, + ) + output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "2"}) + assert output == [(Document(page_content="baz", metadata={"page": "2"}), 4.0)] + + +def test_hologres_with_filter_no_match() -> None: + """Test end to end construction and search.""" + texts = ["foo", "bar", "baz"] + metadatas = [{"page": str(i)} for i in range(len(texts))] + docsearch = Hologres.from_texts( + texts=texts, + table_name="test_table_filter", + embedding=FakeEmbeddingsWithAdaDimension(), + metadatas=metadatas, + connection_string=CONNECTION_STRING, + pre_delete_table=True, + ) + output = docsearch.similarity_search_with_score("foo", k=1, filter={"page": "5"}) + assert output == [] From 010d0bfeea4b3c3b46c4c7be59c4937a75c96e34 Mon Sep 17 00:00:00 2001 From: Ben Flast <55411495+bafMdb@users.noreply.github.com> Date: Sun, 11 Jun 2023 23:57:15 -0400 Subject: [PATCH 36/46] Update MongoDB Atlas support docs (#6022) Updating MongoDB Atlas support docs @hwchase17 let me know if you have any questions --- .../mongodb_atlas_vector_search.ipynb | 37 ++++++++++++++----- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb b/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb index 1d70ac2289aa4..54bf583212678 100644 --- a/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb +++ b/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "id": "683953b3", "metadata": {}, @@ -8,14 +9,14 @@ "#### Commented out until further notice\n", "MongoDB Atlas Vector Search\n", "\n", - ">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a document database managed in the cloud. It also enables Lucene and its vector search feature.\n", + ">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database offered in the cloud service provider of your choice (AWS , Azure, and GCP). It now has support for native Vector Search ontop of your MongoDB document data.\n", "\n", - "This notebook shows how to use the functionality related to the `MongoDB Atlas Vector Search` feature where you can store your embeddings in MongoDB documents and create a Lucene vector index to perform a KNN search.\n", + "This notebook shows how to use `MongoDB Atlas Vector Search` to store your embeddings in MongoDB documents, create a vector search index, and perform KNN search with and approximate nearest neighbor algorithm.\n", "\n", - "It uses the [knnBeta Operator](https://www.mongodb.com/docs/atlas/atlas-search/knn-beta) available in MongoDB Atlas Search. This feature is in early access and available only for evaluation purposes, to validate functionality, and to gather feedback from a small closed group of early access users. It is not recommended for production deployments as we may introduce breaking changes.\n", + "It uses the [knnBeta Operator](https://www.mongodb.com/docs/atlas/atlas-search/knn-beta) available in MongoDB Atlas Search. This feature is in Public Preview and available only for evaluation purposes, to validate functionality, and to gather feedback from public preview users. It is not recommended for production deployments as we may introduce breaking changes.\n", "\n", - "To use MongoDB Atlas, you must have first deployed a cluster. Free clusters are available. \n", - "Here is the MongoDB Atlas [quick start](https://www.mongodb.com/docs/atlas/getting-started/)." + "To use MongoDB Atlas, you must have first deployed a cluster. We have a Forever Free tier of clusters available. \n", + "To get started head over to Atlas here: [quick start](https://www.mongodb.com/docs/atlas/getting-started/)." ] }, { @@ -38,24 +39,39 @@ "outputs": [], "source": [ "import os\n", + "import getpass\n", "\n", - "MONGODB_ATLAS_URI = os.environ['MONGODB_ATLAS_URI']" + "MONGODB_ATLAS_CLUSTER_URI = getpass.getpass('MongoDB Atlas Cluster URI:')\n", + "MONGODB_ATLAS_CLUSTER_URI = os.environ['MONGODB_ATLAS_CLUSTER_URI']" ] }, { + "attachments": {}, "cell_type": "markdown", "id": "457ace44-1d95-4001-9dd5-78811ab208ad", "metadata": {}, "source": [ - "We want to use `OpenAIEmbeddings` so we have to get the OpenAI API Key. Make sure the environment variable `OPENAI_API_KEY` is set up before proceeding." + "We want to use `OpenAIEmbeddings` so we have setup the OpenAI API Key. " ] }, { + "cell_type": "code", + "execution_count": null, + "id": "2d8f240d", + "metadata": {}, + "outputs": [], + "source": [ + "os.environ['OPENAI_API_KEY'] = getpass.getpass('OpenAI API Key:')\n", + "OPENAI_API_KEY = os.environ['OPENAI_API_KEY']" + ] + }, + { + "attachments": {}, "cell_type": "markdown", "id": "1f3ecc42", "metadata": {}, "source": [ - "Now, let's create a Lucene vector index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/define-field-mappings-for-vector-search) to get more details on how to define an Atlas Search index.\n", + "Now, let's create a vector index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/define-field-mappings-for-vector-search) to get more details on how to define an Atlas Vector Search index.\n", "You can name the index `langchain_demo` and create the index on the namespace `lanchain_db.langchain_col`. Finally, write the following definition in the JSON editor:\n", "\n", "```json\n", @@ -115,7 +131,7 @@ "from pymongo import MongoClient\n", "\n", "# initialize MongoDB python client\n", - "client = MongoClient(MONGODB_ATLAS_CONNECTION_STRING)\n", + "client = MongoClient(MONGODB_ATLAS_CLUSTER_URI)\n", "\n", "db_name = \"lanchain_db\"\n", "collection_name = \"langchain_col\"\n", @@ -146,11 +162,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "851a2ec9-9390-49a4-8412-3e132c9f789d", "metadata": {}, "source": [ - "You can reuse vector index you created before, make sure environment variable `OPENAI_API_KEY` is set up, then create another file." + "You can reuse the vector index you created before, make sure environment variable `OPENAI_API_KEY` is set up, then create another file." ] }, { From bb7ac9edb58c05b420e6049eec6829741bfa40e8 Mon Sep 17 00:00:00 2001 From: wenmeng zhou Date: Mon, 12 Jun 2023 12:14:20 +0800 Subject: [PATCH 37/46] add dashscope text embedding (#5929) #### What I do Adding embedding api for [DashScope](https://help.aliyun.com/product/610100.html), which is the DAMO Academy's multilingual text unified vector model based on the LLM base. It caters to multiple mainstream languages worldwide and offers high-quality vector services, helping developers quickly transform text data into high-quality vector data. Currently supported languages include Chinese, English, Spanish, French, Portuguese, Indonesian, and more. #### Who can review? Models - @hwchase17 - @agola11 --------- Co-authored-by: Harrison Chase --- .../text_embedding/examples/dashscope.ipynb | 83 ++++++++++ langchain/embeddings/__init__.py | 2 + langchain/embeddings/dashscope.py | 155 ++++++++++++++++++ .../embeddings/test_dashscope.py | 55 +++++++ 4 files changed, 295 insertions(+) create mode 100644 docs/modules/models/text_embedding/examples/dashscope.ipynb create mode 100644 langchain/embeddings/dashscope.py create mode 100644 tests/integration_tests/embeddings/test_dashscope.py diff --git a/docs/modules/models/text_embedding/examples/dashscope.ipynb b/docs/modules/models/text_embedding/examples/dashscope.ipynb new file mode 100644 index 0000000000000..7095ad5dc714d --- /dev/null +++ b/docs/modules/models/text_embedding/examples/dashscope.ipynb @@ -0,0 +1,83 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# DashScope\n", + "\n", + "Let's load the DashScope Embedding class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.embeddings import DashScopeEmbeddings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "embeddings = DashScopeEmbeddings(model='text-embedding-v1', dashscope_api_key='your-dashscope-api-key')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "text = \"This is a test document.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "query_result = embeddings.embed_query(text)\n", + "print(query_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "doc_results = embeddings.embed_documents([\"foo\"])\n", + "print(doc_results)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "chatgpt", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.4" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/langchain/embeddings/__init__.py b/langchain/embeddings/__init__.py index a54ea9aa4709c..b68769c3986b1 100644 --- a/langchain/embeddings/__init__.py +++ b/langchain/embeddings/__init__.py @@ -8,6 +8,7 @@ ) from langchain.embeddings.bedrock import BedrockEmbeddings from langchain.embeddings.cohere import CohereEmbeddings +from langchain.embeddings.dashscope import DashScopeEmbeddings from langchain.embeddings.deepinfra import DeepInfraEmbeddings from langchain.embeddings.elasticsearch import ElasticsearchEmbeddings from langchain.embeddings.embaas import EmbaasEmbeddings @@ -61,6 +62,7 @@ "VertexAIEmbeddings", "BedrockEmbeddings", "DeepInfraEmbeddings", + "DashScopeEmbeddings", "EmbaasEmbeddings", ] diff --git a/langchain/embeddings/dashscope.py b/langchain/embeddings/dashscope.py new file mode 100644 index 0000000000000..1db6dd1d536ad --- /dev/null +++ b/langchain/embeddings/dashscope.py @@ -0,0 +1,155 @@ +"""Wrapper around DashScope embedding models.""" +from __future__ import annotations + +import logging +from typing import ( + Any, + Callable, + Dict, + List, + Optional, +) + +from pydantic import BaseModel, Extra, root_validator +from requests.exceptions import HTTPError +from tenacity import ( + before_sleep_log, + retry, + retry_if_exception_type, + stop_after_attempt, + wait_exponential, +) + +from langchain.embeddings.base import Embeddings +from langchain.utils import get_from_dict_or_env + +logger = logging.getLogger(__name__) + + +def _create_retry_decorator(embeddings: DashScopeEmbeddings) -> Callable[[Any], Any]: + multiplier = 1 + min_seconds = 1 + max_seconds = 4 + # Wait 2^x * 1 second between each retry starting with + # 1 seconds, then up to 4 seconds, then 4 seconds afterwards + return retry( + reraise=True, + stop=stop_after_attempt(embeddings.max_retries), + wait=wait_exponential(multiplier, min=min_seconds, max=max_seconds), + retry=(retry_if_exception_type(HTTPError)), + before_sleep=before_sleep_log(logger, logging.WARNING), + ) + + +def embed_with_retry(embeddings: DashScopeEmbeddings, **kwargs: Any) -> Any: + """Use tenacity to retry the embedding call.""" + retry_decorator = _create_retry_decorator(embeddings) + + @retry_decorator + def _embed_with_retry(**kwargs: Any) -> Any: + resp = embeddings.client.call(**kwargs) + if resp.status_code == 200: + return resp.output["embeddings"] + elif resp.status_code in [400, 401]: + raise ValueError( + f"status_code: {resp.status_code} \n " + f"code: {resp.code} \n message: {resp.message}" + ) + else: + raise HTTPError( + f"HTTP error occurred: status_code: {resp.status_code} \n " + f"code: {resp.code} \n message: {resp.message}" + ) + + return _embed_with_retry(**kwargs) + + +class DashScopeEmbeddings(BaseModel, Embeddings): + """Wrapper around DashScope embedding models. + + To use, you should have the ``dashscope`` python package installed, and the + environment variable ``DASHSCOPE_API_KEY`` set with your API key or pass it + as a named parameter to the constructor. + + Example: + .. code-block:: python + + from langchain.embeddings import DashScopeEmbeddings + embeddings = DashScopeEmbeddings(dashscope_api_key="my-api-key") + + Example: + .. code-block:: python + + import os + os.environ["DASHSCOPE_API_KEY"] = "your DashScope API KEY" + + from langchain.embeddings.dashscope import DashScopeEmbeddings + embeddings = DashScopeEmbeddings( + model="text-embedding-v1", + ) + text = "This is a test query." + query_result = embeddings.embed_query(text) + + """ + + client: Any #: :meta private: + model: str = "text-embedding-v1" + dashscope_api_key: Optional[str] = None + """Maximum number of retries to make when generating.""" + max_retries: int = 5 + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + import dashscope + + """Validate that api key and python package exists in environment.""" + values["dashscope_api_key"] = get_from_dict_or_env( + values, "dashscope_api_key", "DASHSCOPE_API_KEY" + ) + dashscope.api_key = values["dashscope_api_key"] + try: + import dashscope + + values["client"] = dashscope.TextEmbedding + except ImportError: + raise ImportError( + "Could not import dashscope python package. " + "Please install it with `pip install dashscope`." + ) + return values + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Call out to DashScope's embedding endpoint for embedding search docs. + + Args: + texts: The list of texts to embed. + chunk_size: The chunk size of embeddings. If None, will use the chunk size + specified by the class. + + Returns: + List of embeddings, one for each text. + """ + embeddings = embed_with_retry( + self, input=texts, text_type="document", model=self.model + ) + embedding_list = [item["embedding"] for item in embeddings] + return embedding_list + + def embed_query(self, text: str) -> List[float]: + """Call out to DashScope's embedding endpoint for embedding query text. + + Args: + text: The text to embed. + + Returns: + Embedding for the text. + """ + embedding = embed_with_retry( + self, input=text, text_type="query", model=self.model + )[0]["embedding"] + return embedding diff --git a/tests/integration_tests/embeddings/test_dashscope.py b/tests/integration_tests/embeddings/test_dashscope.py new file mode 100644 index 0000000000000..f61c3805e1a99 --- /dev/null +++ b/tests/integration_tests/embeddings/test_dashscope.py @@ -0,0 +1,55 @@ +"""Test dashscope embeddings.""" +import numpy as np + +from langchain.embeddings.dashscope import DashScopeEmbeddings + + +def test_dashscope_embedding_documents() -> None: + """Test dashscope embeddings.""" + documents = ["foo bar"] + embedding = DashScopeEmbeddings(model="text-embedding-v1") + output = embedding.embed_documents(documents) + assert len(output) == 1 + assert len(output[0]) == 1536 + + +def test_dashscope_embedding_documents_multiple() -> None: + """Test dashscope embeddings.""" + documents = ["foo bar", "bar foo", "foo"] + embedding = DashScopeEmbeddings(model="text-embedding-v1") + output = embedding.embed_documents(documents) + assert len(output) == 3 + assert len(output[0]) == 1536 + assert len(output[1]) == 1536 + assert len(output[2]) == 1536 + + +def test_dashscope_embedding_query() -> None: + """Test dashscope embeddings.""" + document = "foo bar" + embedding = DashScopeEmbeddings(model="text-embedding-v1") + output = embedding.embed_query(document) + assert len(output) == 1536 + + +def test_dashscope_embedding_with_empty_string() -> None: + """Test dashscope embeddings with empty string.""" + import dashscope + + document = ["", "abc"] + embedding = DashScopeEmbeddings(model="text-embedding-v1") + output = embedding.embed_documents(document) + assert len(output) == 2 + assert len(output[0]) == 1536 + expected_output = dashscope.TextEmbedding.call( + input="", model="text-embedding-v1", text_type="document" + ).output["embeddings"][0]["embedding"] + assert np.allclose(output[0], expected_output) + assert len(output[1]) == 1536 + + +if __name__ == "__main__": + test_dashscope_embedding_documents() + test_dashscope_embedding_documents_multiple() + test_dashscope_embedding_query() + test_dashscope_embedding_with_empty_string() From d1561b74ebe370fa6d6ad1d23c2785874481b1bd Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sun, 11 Jun 2023 21:15:42 -0700 Subject: [PATCH 38/46] Harrison/cognitive search (#6011) Co-authored-by: Fabrizio Ruocco --- .../vectorstores/examples/azuresearch.ipynb | 245 +++++++++ langchain/vectorstores/__init__.py | 2 + langchain/vectorstores/azuresearch.py | 507 ++++++++++++++++++ poetry.lock | 26 +- pyproject.toml | 18 +- .../vectorstores/test_azuresearch.py | 93 ++++ 6 files changed, 887 insertions(+), 4 deletions(-) create mode 100644 docs/modules/indexes/vectorstores/examples/azuresearch.ipynb create mode 100644 langchain/vectorstores/azuresearch.py create mode 100644 tests/integration_tests/vectorstores/test_azuresearch.py diff --git a/docs/modules/indexes/vectorstores/examples/azuresearch.ipynb b/docs/modules/indexes/vectorstores/examples/azuresearch.ipynb new file mode 100644 index 0000000000000..36a6abb6986df --- /dev/null +++ b/docs/modules/indexes/vectorstores/examples/azuresearch.ipynb @@ -0,0 +1,245 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Azure Cognitive Search" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Install Azure Cognitive Search SDK" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install --index-url=https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/ azure-search-documents==11.4.0a20230509004\n", + "!pip install azure-identity" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Import required libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "import os, json\n", + "import openai\n", + "from dotenv import load_dotenv\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.schema import BaseRetriever\n", + "from langchain.vectorstores.azuresearch import AzureSearch" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configure OpenAI settings\n", + "Configure the OpenAI settings to use Azure OpenAI or OpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# Load environment variables from a .env file using load_dotenv():\n", + "load_dotenv()\n", + "\n", + "openai.api_type = \"azure\"\n", + "openai.api_base = \"YOUR_OPENAI_ENDPOINT\"\n", + "openai.api_version = \"2023-05-15\"\n", + "openai.api_key = \"YOUR_OPENAI_API_KEY\"\n", + "model: str = \"text-embedding-ada-002\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configure vector store settings\n", + " \n", + "Set up the vector store settings using environment variables:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "vector_store_address: str = 'YOUR_AZURE_SEARCH_ENDPOINT'\n", + "vector_store_password: str = 'YOUR_AZURE_SEARCH_ADMIN_KEY'\n", + "index_name: str = \"langchain-vector-demo\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create embeddings and vector store instances\n", + " \n", + "Create instances of the OpenAIEmbeddings and AzureSearch classes:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1) \n", + "vector_store: AzureSearch = AzureSearch(azure_search_endpoint=vector_store_address, \n", + " azure_search_key=vector_store_password, \n", + " index_name=index_name, \n", + " embedding_function=embeddings.embed_query) \n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert text and embeddings into vector store\n", + " \n", + "Add texts and metadata from the JSON data to the vector store:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.document_loaders import TextLoader\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "loader = TextLoader('../../../state_of_the_union.txt', encoding='utf-8')\n", + "\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", + "docs = text_splitter.split_documents(documents)\n", + "\n", + "vector_store.add_documents(documents=docs)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Perform a vector similarity search\n", + " \n", + "Execute a pure vector similarity search using the similarity_search() method:" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" + ] + } + ], + "source": [ + "# Perform a similarity search\n", + "docs = vector_store.similarity_search(query=\"What did the president say about Ketanji Brown Jackson\", k=3, search_type='similarity')\n", + "print(docs[0].page_content)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Perform a Hybrid Search\n", + "\n", + "Execute hybrid search using the hybrid_search() method:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Tonight. I call on the Senate to: Pass the Freedom to Vote Act. Pass the John Lewis Voting Rights Act. And while you’re at it, pass the Disclose Act so Americans can know who is funding our elections. \n", + "\n", + "Tonight, I’d like to honor someone who has dedicated his life to serve this country: Justice Stephen Breyer—an Army veteran, Constitutional scholar, and retiring Justice of the United States Supreme Court. Justice Breyer, thank you for your service. \n", + "\n", + "One of the most serious constitutional responsibilities a President has is nominating someone to serve on the United States Supreme Court. \n", + "\n", + "And I did that 4 days ago, when I nominated Circuit Court of Appeals Judge Ketanji Brown Jackson. One of our nation’s top legal minds, who will continue Justice Breyer’s legacy of excellence.\n" + ] + } + ], + "source": [ + "# Perform a hybrid search \n", + "docs = vector_store.similarity_search(query=\"What did the president say about Ketanji Brown Jackson\", k=3)\n", + "print(docs[0].page_content)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.9.13 ('.venv': venv)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.3" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "645053d6307d413a1a75681b5ebb6449bb2babba4bcb0bf65a1ddc3dbefb108a" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/langchain/vectorstores/__init__.py b/langchain/vectorstores/__init__.py index c59b06fd65a23..1bb3f901a2673 100644 --- a/langchain/vectorstores/__init__.py +++ b/langchain/vectorstores/__init__.py @@ -3,6 +3,7 @@ from langchain.vectorstores.annoy import Annoy from langchain.vectorstores.atlas import AtlasDB from langchain.vectorstores.awadb import AwaDB +from langchain.vectorstores.azuresearch import AzureSearch from langchain.vectorstores.base import VectorStore from langchain.vectorstores.chroma import Chroma from langchain.vectorstores.clickhouse import Clickhouse, ClickhouseSettings @@ -31,6 +32,7 @@ from langchain.vectorstores.zilliz import Zilliz __all__ = [ + "AzureSearch", "Redis", "ElasticVectorSearch", "FAISS", diff --git a/langchain/vectorstores/azuresearch.py b/langchain/vectorstores/azuresearch.py new file mode 100644 index 0000000000000..94ac1172a66cd --- /dev/null +++ b/langchain/vectorstores/azuresearch.py @@ -0,0 +1,507 @@ +"""Wrapper around Azure Cognitive Search.""" +from __future__ import annotations + +import base64 +import json +import logging +import uuid +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Iterable, + List, + Optional, + Tuple, + Type, +) + +import numpy as np +from pydantic import BaseModel, root_validator + +from langchain.docstore.document import Document +from langchain.embeddings.base import Embeddings +from langchain.schema import BaseRetriever +from langchain.utils import get_from_env +from langchain.vectorstores.base import VectorStore + +logger = logging.getLogger() + +if TYPE_CHECKING: + from azure.search.documents import SearchClient + + +# Allow overriding field names for Azure Search +FIELDS_ID = get_from_env( + key="AZURESEARCH_FIELDS_ID", env_key="AZURESEARCH_FIELDS_ID", default="id" +) +FIELDS_CONTENT = get_from_env( + key="AZURESEARCH_FIELDS_CONTENT", + env_key="AZURESEARCH_FIELDS_CONTENT", + default="content", +) +FIELDS_CONTENT_VECTOR = get_from_env( + key="AZURESEARCH_FIELDS_CONTENT_VECTOR", + env_key="AZURESEARCH_FIELDS_CONTENT_VECTOR", + default="content_vector", +) +FIELDS_METADATA = get_from_env( + key="AZURESEARCH_FIELDS_TAG", env_key="AZURESEARCH_FIELDS_TAG", default="metadata" +) + +MAX_UPLOAD_BATCH_SIZE = 1000 + + +def _get_search_client( + endpoint: str, + key: str, + index_name: str, + embedding_function: Callable, + semantic_configuration_name: Optional[str] = None, +) -> SearchClient: + from azure.core.credentials import AzureKeyCredential + from azure.core.exceptions import ResourceNotFoundError + from azure.identity import DefaultAzureCredential + from azure.search.documents import SearchClient + from azure.search.documents.indexes import SearchIndexClient + from azure.search.documents.indexes.models import ( + PrioritizedFields, + SearchableField, + SearchField, + SearchFieldDataType, + SearchIndex, + SemanticConfiguration, + SemanticField, + SemanticSettings, + SimpleField, + VectorSearch, + VectorSearchAlgorithmConfiguration, + ) + + if key is None: + credential = DefaultAzureCredential() + else: + credential = AzureKeyCredential(key) + index_client: SearchIndexClient = SearchIndexClient( + endpoint=endpoint, credential=credential + ) + try: + index_client.get_index(name=index_name) + except ResourceNotFoundError: + # Fields configuration + fields = [ + SimpleField( + name=FIELDS_ID, + type=SearchFieldDataType.String, + key=True, + filterable=True, + ), + SearchableField( + name=FIELDS_CONTENT, + type=SearchFieldDataType.String, + searchable=True, + retrievable=True, + ), + SearchField( + name=FIELDS_CONTENT_VECTOR, + type=SearchFieldDataType.Collection(SearchFieldDataType.Single), + searchable=True, + dimensions=len(embedding_function("Text")), + vector_search_configuration="default", + ), + SearchableField( + name=FIELDS_METADATA, + type=SearchFieldDataType.String, + searchable=True, + retrievable=True, + ), + ] + # Vector search configuration + vector_search = VectorSearch( + algorithm_configurations=[ + VectorSearchAlgorithmConfiguration( + name="default", + kind="hnsw", + hnsw_parameters={ + "m": 4, + "efConstruction": 400, + "efSearch": 500, + "metric": "cosine", + }, + ) + ] + ) + # Create the semantic settings with the configuration + semantic_settings = ( + None + if semantic_configuration_name is None + else SemanticSettings( + configurations=[ + SemanticConfiguration( + name=semantic_configuration_name, + prioritized_fields=PrioritizedFields( + prioritized_content_fields=[ + SemanticField(field_name=FIELDS_CONTENT) + ], + ), + ) + ] + ) + ) + # Create the search index with the semantic settings and vector search + index = SearchIndex( + name=index_name, + fields=fields, + vector_search=vector_search, + semantic_settings=semantic_settings, + ) + index_client.create_index(index) + # Create the search client + return SearchClient(endpoint=endpoint, index_name=index_name, credential=credential) + + +class AzureSearch(VectorStore): + def __init__( + self, + azure_search_endpoint: str, + azure_search_key: str, + index_name: str, + embedding_function: Callable, + search_type: str = "hybrid", + semantic_configuration_name: Optional[str] = None, + semantic_query_language: str = "en-us", + **kwargs: Any, + ): + """Initialize with necessary components.""" + # Initialize base class + self.embedding_function = embedding_function + self.client = _get_search_client( + azure_search_endpoint, + azure_search_key, + index_name, + embedding_function, + semantic_configuration_name, + ) + self.search_type = search_type + self.semantic_configuration_name = semantic_configuration_name + self.semantic_query_language = semantic_query_language + + def add_texts( + self, + texts: Iterable[str], + metadatas: Optional[List[dict]] = None, + **kwargs: Any, + ) -> List[str]: + """Add texts data to an existing index.""" + keys = kwargs.get("keys") + ids = [] + # Write data to index + data = [] + for i, text in enumerate(texts): + # Use provided key otherwise use default key + key = keys[i] if keys else str(uuid.uuid4()) + # Encoding key for Azure Search valid characters + key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii") + metadata = metadatas[i] if metadatas else {} + # Add data to index + data.append( + { + "@search.action": "upload", + FIELDS_ID: key, + FIELDS_CONTENT: text, + FIELDS_CONTENT_VECTOR: np.array( + self.embedding_function(text), dtype=np.float32 + ).tolist(), + FIELDS_METADATA: json.dumps(metadata), + } + ) + ids.append(key) + # Upload data in batches + if len(data) == MAX_UPLOAD_BATCH_SIZE: + response = self.client.upload_documents(documents=data) + # Check if all documents were successfully uploaded + if not all([r.succeeded for r in response]): + raise Exception(response) + # Reset data + data = [] + + # Considering case where data is an exact multiple of batch-size entries + if len(data) == 0: + return ids + + # Upload data to index + response = self.client.upload_documents(documents=data) + # Check if all documents were successfully uploaded + if all([r.succeeded for r in response]): + return ids + else: + raise Exception(response) + + def similarity_search( + self, query: str, k: int = 4, **kwargs: Any + ) -> List[Document]: + search_type = kwargs.get("search_type", self.search_type) + if search_type == "similarity": + docs = self.vector_search(query, k=k) + elif search_type == "hybrid": + docs = self.hybrid_search(query, k=k) + elif search_type == "semantic_hybrid": + docs = self.semantic_hybrid_search(query, k=k) + else: + raise ValueError(f"search_type of {search_type} not allowed.") + return docs + + def vector_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]: + """ + Returns the most similar indexed documents to the query text. + + Args: + query (str): The query text for which to find similar documents. + k (int): The number of documents to return. Default is 4. + + Returns: + List[Document]: A list of documents that are most similar to the query text. + """ + docs_and_scores = self.vector_search_with_score( + query, k=k, filters=kwargs.get("filters", None) + ) + return [doc for doc, _ in docs_and_scores] + + def vector_search_with_score( + self, query: str, k: int = 4, filters: Optional[str] = None + ) -> List[Tuple[Document, float]]: + """Return docs most similar to query. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + + Returns: + List of Documents most similar to the query and score for each + """ + from azure.search.documents.models import Vector + + results = self.client.search( + search_text="", + vector=Vector( + value=np.array( + self.embedding_function(query), dtype=np.float32 + ).tolist(), + k=k, + fields=FIELDS_CONTENT_VECTOR, + ), + select=[f"{FIELDS_ID},{FIELDS_CONTENT},{FIELDS_METADATA}"], + filter=filters, + ) + # Convert results to Document objects + docs = [ + ( + Document( + page_content=result[FIELDS_CONTENT], + metadata=json.loads(result[FIELDS_METADATA]), + ), + float(result["@search.score"]), + ) + for result in results + ] + return docs + + def hybrid_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]: + """ + Returns the most similar indexed documents to the query text. + + Args: + query (str): The query text for which to find similar documents. + k (int): The number of documents to return. Default is 4. + + Returns: + List[Document]: A list of documents that are most similar to the query text. + """ + docs_and_scores = self.hybrid_search_with_score( + query, k=k, filters=kwargs.get("filters", None) + ) + return [doc for doc, _ in docs_and_scores] + + def hybrid_search_with_score( + self, query: str, k: int = 4, filters: Optional[str] = None + ) -> List[Tuple[Document, float]]: + """Return docs most similar to query with an hybrid query. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + + Returns: + List of Documents most similar to the query and score for each + """ + from azure.search.documents.models import Vector + + results = self.client.search( + search_text=query, + vector=Vector( + value=np.array( + self.embedding_function(query), dtype=np.float32 + ).tolist(), + k=k, + fields=FIELDS_CONTENT_VECTOR, + ), + select=[f"{FIELDS_ID},{FIELDS_CONTENT},{FIELDS_METADATA}"], + filter=filters, + top=k, + ) + # Convert results to Document objects + docs = [ + ( + Document( + page_content=result[FIELDS_CONTENT], + metadata=json.loads(result[FIELDS_METADATA]), + ), + float(result["@search.score"]), + ) + for result in results + ] + return docs + + def semantic_hybrid_search( + self, query: str, k: int = 4, **kwargs: Any + ) -> List[Document]: + """ + Returns the most similar indexed documents to the query text. + + Args: + query (str): The query text for which to find similar documents. + k (int): The number of documents to return. Default is 4. + + Returns: + List[Document]: A list of documents that are most similar to the query text. + """ + docs_and_scores = self.semantic_hybrid_search_with_score( + query, k=k, filters=kwargs.get("filters", None) + ) + return [doc for doc, _ in docs_and_scores] + + def semantic_hybrid_search_with_score( + self, query: str, k: int = 4, filters: Optional[str] = None + ) -> List[Tuple[Document, float]]: + """Return docs most similar to query with an hybrid query. + + Args: + query: Text to look up documents similar to. + k: Number of Documents to return. Defaults to 4. + + Returns: + List of Documents most similar to the query and score for each + """ + from azure.search.documents.models import Vector + + results = self.client.search( + search_text=query, + vector=Vector( + value=np.array( + self.embedding_function(query), dtype=np.float32 + ).tolist(), + k=50, # Hardcoded value to maximize L2 retrieval + fields=FIELDS_CONTENT_VECTOR, + ), + select=[f"{FIELDS_ID},{FIELDS_CONTENT},{FIELDS_METADATA}"], + filter=filters, + query_type="semantic", + query_language=self.semantic_query_language, + semantic_configuration_name=self.semantic_configuration_name, + query_caption="extractive", + query_answer="extractive", + top=k, + ) + # Get Semantic Answers + semantic_answers = results.get_answers() + semantic_answers_dict = {} + for semantic_answer in semantic_answers: + semantic_answers_dict[semantic_answer.key] = { + "text": semantic_answer.text, + "highlights": semantic_answer.highlights, + } + # Convert results to Document objects + docs = [ + ( + Document( + page_content=result["content"], + metadata={ + **json.loads(result["metadata"]), + **{ + "captions": { + "text": result.get("@search.captions", [{}])[0].text, + "highlights": result.get("@search.captions", [{}])[ + 0 + ].highlights, + } + if result.get("@search.captions") + else {}, + "answers": semantic_answers_dict.get( + json.loads(result["metadata"]).get("key"), "" + ), + }, + }, + ), + float(result["@search.score"]), + ) + for result in results + ] + return docs + + @classmethod + def from_texts( + cls: Type[AzureSearch], + texts: List[str], + embedding: Embeddings, + metadatas: Optional[List[dict]] = None, + azure_search_endpoint: str = "", + azure_search_key: str = "", + index_name: str = "langchain-index", + **kwargs: Any, + ) -> AzureSearch: + # Creating a new Azure Search instance + azure_search = cls( + azure_search_endpoint, + azure_search_key, + index_name, + embedding.embed_query, + ) + azure_search.add_texts(texts, metadatas, **kwargs) + return azure_search + + +class AzureSearchVectorStoreRetriever(BaseRetriever, BaseModel): + vectorstore: AzureSearch + search_type: str = "hybrid" + k: int = 4 + + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True + + @root_validator() + def validate_search_type(cls, values: Dict) -> Dict: + """Validate search type.""" + if "search_type" in values: + search_type = values["search_type"] + if search_type not in ("similarity", "hybrid", "semantic_hybrid"): + raise ValueError(f"search_type of {search_type} not allowed.") + return values + + def get_relevant_documents(self, query: str) -> List[Document]: + if self.search_type == "similarity": + docs = self.vectorstore.vector_search(query, k=self.k) + elif self.search_type == "hybrid": + docs = self.vectorstore.hybrid_search(query, k=self.k) + elif self.search_type == "semantic_hybrid": + docs = self.vectorstore.semantic_hybrid_search(query, k=self.k) + else: + raise ValueError(f"search_type of {self.search_type} not allowed.") + return docs + + async def aget_relevant_documents(self, query: str) -> List[Document]: + raise NotImplementedError( + "AzureSearchVectorStoreRetriever does not support async" + ) diff --git a/poetry.lock b/poetry.lock index 9ed0db756c9e3..6d5bb1e0c2ba2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -702,6 +702,28 @@ msal = ">=1.20.0,<2.0.0" msal-extensions = ">=0.3.0,<2.0.0" six = ">=1.12.0" +[[package]] +name = "azure-search-documents" +version = "11.4.0a20230509004" +description = "Microsoft Azure Cognitive Search Client Library for Python" +category = "main" +optional = true +python-versions = ">=3.7" +files = [ + {file = "azure-search-documents-11.4.0a20230509004.zip", hash = "sha256:6cca144573161a10aa0fcd13927264453e79c63be6a53cf2ec241c9c8c22f6b5"}, + {file = "azure_search_documents-11.4.0a20230509004-py3-none-any.whl", hash = "sha256:6215e9a4f9e935ff3eac1b7d5519c6c0789b4497eb11242d376911aaefbb0359"}, +] + +[package.dependencies] +azure-common = ">=1.1,<2.0" +azure-core = ">=1.24.0,<2.0.0" +isodate = ">=0.6.0" + +[package.source] +type = "legacy" +url = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple" +reference = "azure-sdk-dev" + [[package]] name = "babel" version = "2.12.1" @@ -11451,7 +11473,7 @@ cffi = ["cffi (>=1.11)"] [extras] all = ["anthropic", "cohere", "openai", "nlpcloud", "huggingface_hub", "jina", "manifest-ml", "elasticsearch", "opensearch-py", "google-search-results", "faiss-cpu", "sentence-transformers", "transformers", "spacy", "nltk", "wikipedia", "beautifulsoup4", "tiktoken", "torch", "jinja2", "pinecone-client", "pinecone-text", "pymongo", "weaviate-client", "redis", "google-api-python-client", "google-auth", "wolframalpha", "qdrant-client", "tensorflow-text", "pypdf", "networkx", "nomic", "aleph-alpha-client", "deeplake", "pgvector", "psycopg2-binary", "pyowm", "pytesseract", "html2text", "atlassian-python-api", "gptcache", "duckduckgo-search", "arxiv", "azure-identity", "clickhouse-connect", "azure-cosmos", "lancedb", "langkit", "lark", "pexpect", "pyvespa", "O365", "jq", "docarray", "steamship", "pdfminer-six", "lxml", "requests-toolbelt", "neo4j", "openlm", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "momento", "singlestoredb", "tigrisdb", "nebula3-python", "awadb"] -azure = ["azure-identity", "azure-cosmos", "openai", "azure-core", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech"] +azure = ["azure-identity", "azure-cosmos", "openai", "azure-core", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech", "azure-search-documents"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] @@ -11464,4 +11486,4 @@ text-helpers = ["chardet"] [metadata] lock-version = "2.0" python-versions = ">=3.8.1,<4.0" -content-hash = "7a39130af070d4a4fe6b0af5d6b70615c868ab0b1867e404060ff00eacd10f5f" +content-hash = "17e9c7a2ae2d0ef7cf45bc232ebeb7fd3eee2760bb2a19b34a63dcddafd3e4ad" diff --git a/pyproject.toml b/pyproject.toml index fc6edb5b8a962..0278d4ab84ac5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -107,7 +107,7 @@ tigrisdb = {version = "^1.0.0b6", optional = true} nebula3-python = {version = "^3.4.0", optional = true} langchainplus-sdk = ">=0.0.7" awadb = {version = "^0.3.2", optional = true} - +azure-search-documents = {version = "11.4.0a20230509004", source = "azure-sdk-dev", optional = true} [tool.poetry.group.docs.dependencies] autodoc_pydantic = "^1.8.0" @@ -218,7 +218,16 @@ text_helpers = ["chardet"] cohere = ["cohere"] docarray = ["docarray"] embeddings = ["sentence-transformers"] -azure = ["azure-identity", "azure-cosmos", "openai", "azure-core", "azure-ai-formrecognizer", "azure-ai-vision", "azure-cognitiveservices-speech"] +azure = [ + "azure-identity", + "azure-cosmos", + "openai", + "azure-core", + "azure-ai-formrecognizer", + "azure-ai-vision", + "azure-cognitiveservices-speech", + "azure-search-documents", +] all = [ "anthropic", "cohere", @@ -320,6 +329,11 @@ extended_testing = [ "openai" ] +[[tool.poetry.source]] +name = "azure-sdk-dev" +url = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/" +secondary = true + [tool.ruff] select = [ "E", # pycodestyle diff --git a/tests/integration_tests/vectorstores/test_azuresearch.py b/tests/integration_tests/vectorstores/test_azuresearch.py new file mode 100644 index 0000000000000..b5c3b720cf2f5 --- /dev/null +++ b/tests/integration_tests/vectorstores/test_azuresearch.py @@ -0,0 +1,93 @@ +import os +import time + +import openai +import pytest +from dotenv import load_dotenv + +from langchain.embeddings.openai import OpenAIEmbeddings +from langchain.vectorstores.azuresearch import AzureSearch + +load_dotenv() + +# Azure OpenAI settings +openai.api_type = "azure" +openai.api_base = os.getenv("OPENAI_API_BASE", "") +openai.api_version = "2023-05-15" +openai.api_key = os.getenv("OPENAI_API_KEY", "") +model: str = os.getenv("OPENAI_EMBEDDINGS_ENGINE_DOC", "text-embedding-ada-002") + +# Vector store settings +vector_store_address: str = os.getenv("AZURE_SEARCH_ENDPOINT", "") +vector_store_password: str = os.getenv("AZURE_SEARCH_ADMIN_KEY", "") +index_name: str = "embeddings-vector-store-test" + + +@pytest.fixture +def similarity_search_test() -> None: + """Test end to end construction and search.""" + # Create Embeddings + embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1) + # Create Vector store + vector_store: AzureSearch = AzureSearch( + azure_search_endpoint=vector_store_address, + azure_search_key=vector_store_password, + index_name=index_name, + embedding_function=embeddings.embed_query, + ) + # Add texts to vector store and perform a similarity search + vector_store.add_texts( + ["Test 1", "Test 2", "Test 3"], + [ + {"title": "Title 1", "any_metadata": "Metadata 1"}, + {"title": "Title 2", "any_metadata": "Metadata 2"}, + {"title": "Title 3", "any_metadata": "Metadata 3"}, + ], + ) + time.sleep(1) + res = vector_store.similarity_search(query="Test 1", k=3) + assert len(res) == 3 + + +def from_text_similarity_search_test() -> None: + """Test end to end construction and search.""" + # Create Embeddings + embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1) + # Create Vector store + vector_store: AzureSearch = AzureSearch.from_texts( + azure_search_endpoint=vector_store_address, + azure_search_key=vector_store_password, + index_name=index_name, + texts=["Test 1", "Test 2", "Test 3"], + embedding=embeddings, + ) + time.sleep(1) + # Perform a similarity search + res = vector_store.similarity_search(query="Test 1", k=3) + assert len(res) == 3 + + +def test_semantic_hybrid_search() -> None: + """Test end to end construction and search.""" + # Create Embeddings + embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=model, chunk_size=1) + # Create Vector store + vector_store: AzureSearch = AzureSearch( + azure_search_endpoint=vector_store_address, + azure_search_key=vector_store_password, + index_name=index_name, + embedding_function=embeddings.embed_query, + semantic_configuration_name="default", + ) + # Add texts to vector store and perform a semantic hybrid search + vector_store.add_texts( + ["Test 1", "Test 2", "Test 3"], + [ + {"title": "Title 1", "any_metadata": "Metadata 1"}, + {"title": "Title 2", "any_metadata": "Metadata 2"}, + {"title": "Title 3", "any_metadata": "Metadata 3"}, + ], + ) + time.sleep(1) + res = vector_store.semantic_hybrid_search(query="What's Azure Search?", k=3) + assert len(res) == 3 From 289e9aeb9d122d689d68b2e77236ce3dfcd606a7 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Sun, 11 Jun 2023 21:32:45 -0700 Subject: [PATCH 39/46] bump ver to 198 (#6026) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0278d4ab84ac5..95d40530806a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langchain" -version = "0.0.197" +version = "0.0.198" description = "Building applications with LLMs through composability" authors = [] license = "MIT" From 7a5e36f3f5b49ea681124ce9a47beca2ecb17d17 Mon Sep 17 00:00:00 2001 From: Ben Flast <55411495+bafMdb@users.noreply.github.com> Date: Mon, 12 Jun 2023 10:29:27 -0400 Subject: [PATCH 40/46] Mongo db doc fix (#6042) I missed a few errors in my initial fix @hwchase1. Thanks! --- .../examples/mongodb_atlas_vector_search.ipynb | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb b/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb index 54bf583212678..b41ec8c8a697e 100644 --- a/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb +++ b/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb @@ -9,13 +9,13 @@ "#### Commented out until further notice\n", "MongoDB Atlas Vector Search\n", "\n", - ">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database offered in the cloud service provider of your choice (AWS , Azure, and GCP). It now has support for native Vector Search ontop of your MongoDB document data.\n", + ">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database available in AWS , Azure, and GCP. It now has support for native Vector Search on your MongoDB document data.\n", "\n", - "This notebook shows how to use `MongoDB Atlas Vector Search` to store your embeddings in MongoDB documents, create a vector search index, and perform KNN search with and approximate nearest neighbor algorithm.\n", + "This notebook shows how to use `MongoDB Atlas Vector Search` to store your embeddings in MongoDB documents, create a vector search index, and perform KNN search with an approximate nearest neighbor algorithm.\n", "\n", - "It uses the [knnBeta Operator](https://www.mongodb.com/docs/atlas/atlas-search/knn-beta) available in MongoDB Atlas Search. This feature is in Public Preview and available only for evaluation purposes, to validate functionality, and to gather feedback from public preview users. It is not recommended for production deployments as we may introduce breaking changes.\n", + "It uses the [knnBeta Operator](https://www.mongodb.com/docs/atlas/atlas-search/knn-beta) available in MongoDB Atlas Search. This feature is in Public Preview and available for evaluation purposes, to validate functionality, and to gather feedback from public preview users. It is not recommended for production deployments as we may introduce breaking changes.\n", "\n", - "To use MongoDB Atlas, you must have first deployed a cluster. We have a Forever Free tier of clusters available. \n", + "To use MongoDB Atlas, you must first deploy a cluster. We have a Forever-Free tier of clusters available. \n", "To get started head over to Atlas here: [quick start](https://www.mongodb.com/docs/atlas/getting-started/)." ] }, @@ -51,7 +51,7 @@ "id": "457ace44-1d95-4001-9dd5-78811ab208ad", "metadata": {}, "source": [ - "We want to use `OpenAIEmbeddings` so we have setup the OpenAI API Key. " + "We want to use `OpenAIEmbeddings` so we need to set up our OpenAI API Key. " ] }, { @@ -71,8 +71,8 @@ "id": "1f3ecc42", "metadata": {}, "source": [ - "Now, let's create a vector index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/define-field-mappings-for-vector-search) to get more details on how to define an Atlas Vector Search index.\n", - "You can name the index `langchain_demo` and create the index on the namespace `lanchain_db.langchain_col`. Finally, write the following definition in the JSON editor:\n", + "Now, let's create a vector search index on your cluster. In the below example, `embedding` is the name of the field that contains the embedding vector. Please refer to the [documentation](https://www.mongodb.com/docs/atlas/atlas-search/define-field-mappings-for-vector-search) to get more details on how to define an Atlas Vector Search index.\n", + "You can name the index `langchain_demo` and create the index on the namespace `lanchain_db.langchain_col`. Finally, write the following definition in the JSON editor on MongoDB Atlas:\n", "\n", "```json\n", "{\n", @@ -167,7 +167,7 @@ "id": "851a2ec9-9390-49a4-8412-3e132c9f789d", "metadata": {}, "source": [ - "You can reuse the vector index you created before, make sure environment variable `OPENAI_API_KEY` is set up, then create another file." + "You can reuse the vector search index you created, make sure the `OPENAI_API_KEY` environment variable is set up, then execute another query." ] }, { From 681ba6d520585503ab5c0b8af0a95c35be7e26b6 Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Mon, 12 Jun 2023 08:00:14 -0700 Subject: [PATCH 41/46] embaas title --- .../text_embedding/examples/embaas.ipynb | 93 ++++++++----------- 1 file changed, 39 insertions(+), 54 deletions(-) diff --git a/docs/modules/models/text_embedding/examples/embaas.ipynb b/docs/modules/models/text_embedding/examples/embaas.ipynb index cb5132e8c6751..5a1350e7638bf 100644 --- a/docs/modules/models/text_embedding/examples/embaas.ipynb +++ b/docs/modules/models/text_embedding/examples/embaas.ipynb @@ -2,142 +2,127 @@ "cells": [ { "cell_type": "markdown", + "metadata": {}, "source": [ + "# Embaas\n", + "\n", "[embaas](https://embaas.io) is a fully managed NLP API service that offers features like embedding generation, document text extraction, document to embeddings and more. You can choose a [variety of pre-trained models](https://embaas.io/docs/models/embeddings).\n", "\n", "In this tutorial, we will show you how to use the embaas Embeddings API to generate embeddings for a given text.\n", "\n", "### Prerequisites\n", "Create your free embaas account at [https://embaas.io/register](https://embaas.io/register) and generate an [API key](https://embaas.io/dashboard/api-keys)." - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, "outputs": [], "source": [ "# Set API key\n", "embaas_api_key = \"YOUR_API_KEY\"\n", "# or set environment variable\n", "os.environ[\"EMBAAS_API_KEY\"] = \"YOUR_API_KEY\"" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, "outputs": [], "source": [ "from langchain.embeddings import EmbaasEmbeddings" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, "outputs": [], "source": [ "embeddings = EmbaasEmbeddings()" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-10T11:17:55.940265Z", + "start_time": "2023-06-10T11:17:55.938517Z" + } + }, "outputs": [], "source": [ "# Create embeddings for a single document\n", "doc_text = \"This is a test document.\"\n", "doc_text_embedding = embeddings.embed_query(doc_text)" - ], - "metadata": { - "collapsed": false, - "ExecuteTime": { - "start_time": "2023-06-10T11:17:55.938517Z", - "end_time": "2023-06-10T11:17:55.940265Z" - } - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, "outputs": [], "source": [ "# Print created embedding\n", "print(doc_text_embedding)" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 9, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-10T11:19:25.237161Z", + "start_time": "2023-06-10T11:19:25.235320Z" + } + }, "outputs": [], "source": [ "# Create embeddings for multiple documents\n", "doc_texts = [\"This is a test document.\", \"This is another test document.\"]\n", "doc_texts_embeddings = embeddings.embed_documents(doc_texts)" - ], - "metadata": { - "collapsed": false, - "ExecuteTime": { - "start_time": "2023-06-10T11:19:25.235320Z", - "end_time": "2023-06-10T11:19:25.237161Z" - } - } + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, "outputs": [], "source": [ "# Print created embeddings\n", "for i, doc_text_embedding in enumerate(doc_texts_embeddings):\n", " print(f\"Embedding for document {i + 1}: {doc_text_embedding}\")" - ], - "metadata": { - "collapsed": false - } + ] }, { "cell_type": "code", "execution_count": 11, + "metadata": { + "ExecuteTime": { + "end_time": "2023-06-10T11:22:26.139769Z", + "start_time": "2023-06-10T11:22:26.138357Z" + } + }, "outputs": [], "source": [ "# Using a different model and/or custom instruction\n", "embeddings = EmbaasEmbeddings(model=\"instructor-large\", instruction=\"Represent the Wikipedia document for retrieval\")" - ], - "metadata": { - "collapsed": false, - "ExecuteTime": { - "start_time": "2023-06-10T11:22:26.138357Z", - "end_time": "2023-06-10T11:22:26.139769Z" - } - } + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "For more detailed information about the embaas Embeddings API, please refer to [the official embaas API documentation](https://embaas.io/api-reference)." - ], - "metadata": { - "collapsed": false - } + ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -155,5 +140,5 @@ } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 1 } From 5922742d565aadfeef58648067b0874c5c57366d Mon Sep 17 00:00:00 2001 From: Harrison Chase Date: Mon, 12 Jun 2023 10:57:31 -0700 Subject: [PATCH 42/46] comment out --- .../examples/mongodb_atlas_vector_search.ipynb | 9 +++------ docs/modules/models/text_embedding/examples/embaas.ipynb | 6 +++--- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb b/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb index b41ec8c8a697e..4ebd0a723da77 100644 --- a/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb +++ b/docs/modules/indexes/vectorstores/examples/mongodb_atlas_vector_search.ipynb @@ -1,12 +1,12 @@ { "cells": [ { - "attachments": {}, "cell_type": "markdown", "id": "683953b3", "metadata": {}, "source": [ - "#### Commented out until further notice\n", + "Commented out until further notice\n", + "\n", "MongoDB Atlas Vector Search\n", "\n", ">[MongoDB Atlas](https://www.mongodb.com/docs/atlas/) is a fully-managed cloud database available in AWS , Azure, and GCP. It now has support for native Vector Search on your MongoDB document data.\n", @@ -46,7 +46,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "457ace44-1d95-4001-9dd5-78811ab208ad", "metadata": {}, @@ -66,7 +65,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "1f3ecc42", "metadata": {}, @@ -162,7 +160,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "851a2ec9-9390-49a4-8412-3e132c9f789d", "metadata": {}, @@ -220,7 +217,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.9.1" } }, "nbformat": 4, diff --git a/docs/modules/models/text_embedding/examples/embaas.ipynb b/docs/modules/models/text_embedding/examples/embaas.ipynb index 5a1350e7638bf..2473fe9045003 100644 --- a/docs/modules/models/text_embedding/examples/embaas.ipynb +++ b/docs/modules/models/text_embedding/examples/embaas.ipynb @@ -129,14 +129,14 @@ "language_info": { "codemirror_mode": { "name": "ipython", - "version": 2 + "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.6" + "pygments_lexer": "ipython3", + "version": "3.9.1" } }, "nbformat": 4, From 2c91f0d750eb153f72f7d095fadde18f8c683de8 Mon Sep 17 00:00:00 2001 From: Jens Madsen Date: Mon, 12 Jun 2023 22:27:10 +0200 Subject: [PATCH 43/46] chore: spedd up integration test by using smaller model (#6044) Adds a new parameter `relative_chunk_overlap` for the `SentenceTransformersTokenTextSplitter` constructor. The parameter sets the chunk overlap using a relative factor, e.g. for a model where the token limit is 100, a `relative_chunk_overlap=0.5` implies that `chunk_overlap=50` Tag maintainers/contributors who might be interested: @hwchase17, @dev2049 --- tests/integration_tests/test_text_splitter.py | 31 ++++++++++++++----- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/tests/integration_tests/test_text_splitter.py b/tests/integration_tests/test_text_splitter.py index 3cf78c71a0b71..e27108f98874b 100644 --- a/tests/integration_tests/test_text_splitter.py +++ b/tests/integration_tests/test_text_splitter.py @@ -52,14 +52,14 @@ def test_token_text_splitter_from_tiktoken() -> None: def test_sentence_transformers_count_tokens() -> None: splitter = SentenceTransformersTokenTextSplitter( - model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2" + model_name="sentence-transformers/paraphrase-albert-small-v2" ) text = "Lorem ipsum" token_count = splitter.count_tokens(text=text) expected_start_stop_token_count = 2 - expected_text_token_count = 2 + expected_text_token_count = 5 expected_token_count = expected_start_stop_token_count + expected_text_token_count assert expected_token_count == token_count @@ -67,9 +67,9 @@ def test_sentence_transformers_count_tokens() -> None: def test_sentence_transformers_split_text() -> None: splitter = SentenceTransformersTokenTextSplitter( - model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2" + model_name="sentence-transformers/paraphrase-albert-small-v2" ) - text = "Lorem ipsum" + text = "lorem ipsum" text_chunks = splitter.split_text(text=text) expected_text_chunks = [text] assert expected_text_chunks == text_chunks @@ -79,14 +79,29 @@ def test_sentence_transformers_multiple_tokens() -> None: splitter = SentenceTransformersTokenTextSplitter(chunk_overlap=0) text = "Lorem " + text_token_count_including_start_and_stop_tokens = splitter.count_tokens(text=text) count_start_and_end_tokens = 2 - text_token_count = splitter.count_tokens(text=text) - count_start_and_end_tokens - token_multiplier = splitter.maximum_tokens_per_chunk // text_token_count + 1 - text_chunks = splitter.split_text(text=text * token_multiplier) + token_multiplier = ( + count_start_and_end_tokens + + (splitter.maximum_tokens_per_chunk - count_start_and_end_tokens) + // ( + text_token_count_including_start_and_stop_tokens + - count_start_and_end_tokens + ) + + 1 + ) + + # `text_to_split` does not fit in a single chunk + text_to_embed = text * token_multiplier + + text_chunks = splitter.split_text(text=text_to_embed) expected_number_of_chunks = 2 assert expected_number_of_chunks == len(text_chunks) actual = splitter.count_tokens(text=text_chunks[1]) - count_start_and_end_tokens - expected = token_multiplier * text_token_count - splitter.maximum_tokens_per_chunk + expected = ( + token_multiplier * (text_token_count_including_start_and_stop_tokens - 2) + - splitter.maximum_tokens_per_chunk + ) assert expected == actual From b023f0c0f244f890c8d9102776b9586eb603cb2a Mon Sep 17 00:00:00 2001 From: Lance Martin <122662504+rlancemartin@users.noreply.github.com> Date: Mon, 12 Jun 2023 15:46:42 -0700 Subject: [PATCH 44/46] Text splitter for Markdown files by header (#5860) This creates a new kind of text splitter for markdown files. The user can supply a set of headers that they want to split the file on. We define a new text splitter class, `MarkdownHeaderTextSplitter`, that does a few things: (1) For each line, it determines the associated set of user-specified headers (2) It groups lines with common headers into splits See notebook for example usage and test cases. --- .../examples/markdown_header_metadata.ipynb | 324 ++++++++++++++++++ langchain/text_splitter.py | 150 ++++++++ 2 files changed, 474 insertions(+) create mode 100644 docs/modules/indexes/text_splitters/examples/markdown_header_metadata.ipynb diff --git a/docs/modules/indexes/text_splitters/examples/markdown_header_metadata.ipynb b/docs/modules/indexes/text_splitters/examples/markdown_header_metadata.ipynb new file mode 100644 index 0000000000000..db300d63075af --- /dev/null +++ b/docs/modules/indexes/text_splitters/examples/markdown_header_metadata.ipynb @@ -0,0 +1,324 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "70e9b619", + "metadata": {}, + "source": [ + "# MarkdownHeaderTextSplitter\n", + "\n", + "The objective is to split a markdown file by a specified set of headers.\n", + " \n", + "**Given this example:**\n", + "\n", + "# Foo\n", + "\n", + "## Bar\n", + "\n", + "Hi this is Jim \n", + "Hi this is Joe\n", + "\n", + "## Baz\n", + "\n", + "Hi this is Molly\n", + " \n", + "**Written as:**\n", + "\n", + "```\n", + "md = '# Foo\\n\\n ## Bar\\n\\nHi this is Jim \\nHi this is Joe\\n\\n ## Baz\\n\\n Hi this is Molly' \n", + "```\n", + "\n", + "**If we want to split on specified headers:**\n", + "```\n", + "[(\"#\", \"Header 1\"),(\"##\", \"Header 2\")]\n", + "```\n", + "\n", + "**Then we expect:** \n", + "```\n", + "{'content': 'Hi this is Jim \\nHi this is Joe', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}\n", + "{'content': 'Hi this is Molly', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Baz'}}\n", + "```\n", + "\n", + "**Options:**\n", + " \n", + "This also includes `return_each_line` in case a user want to perform other types of aggregation. \n", + "\n", + "If `return_each_line=True`, each line and associated header metadata are returned. " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "19c044f0", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.text_splitter import MarkdownHeaderTextSplitter" + ] + }, + { + "cell_type": "markdown", + "id": "ec8d8053", + "metadata": {}, + "source": [ + "`Test case 1`" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "5cd0a66c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'content': 'Hi this is Jim', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}\n", + "{'content': 'Hi this is Joe', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}\n", + "{'content': 'Hi this is Molly', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Baz'}}\n" + ] + } + ], + "source": [ + "# Doc\n", + "markdown_document = '# Foo\\n\\n ## Bar\\n\\nHi this is Jim\\n\\nHi this is Joe\\n\\n ## Baz\\n\\n Hi this is Molly' \n", + " \n", + "# Test case 1\n", + "headers_to_split_on = [\n", + " (\"#\", \"Header 1\"),\n", + " (\"##\", \"Header 2\"),\n", + "]\n", + "\n", + "markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on,return_each_line=True)\n", + "\n", + "chunked_docs = markdown_splitter.split_text(markdown_document)\n", + "for chunk in chunked_docs:\n", + " print(chunk)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "67d25a1c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'content': 'Hi this is Jim \\nHi this is Joe', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}\n", + "{'content': 'Hi this is Molly', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Baz'}}\n" + ] + } + ], + "source": [ + "markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on,return_each_line=False)\n", + "chunked_docs = markdown_splitter.split_text(markdown_document)\n", + "for chunk in chunked_docs:\n", + " print(chunk)" + ] + }, + { + "cell_type": "markdown", + "id": "f1f74dfa", + "metadata": {}, + "source": [ + "`Test case 2`" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "2183c96a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'content': 'Text under H3.', 'metadata': {'Header 1': 'H1', 'Header 2': 'H2', 'Header 3': 'H3'}}\n", + "{'content': 'Text under H2_2.', 'metadata': {'Header 1': 'H1_2', 'Header 2': 'H2_2'}}\n" + ] + } + ], + "source": [ + "headers_to_split_on = [\n", + " (\"#\", \"Header 1\"),\n", + " (\"##\", \"Header 2\"),\n", + " (\"###\", \"Header 3\"),\n", + "]\n", + "markdown_document = '# H1\\n\\n## H2\\n\\n### H3\\n\\nText under H3.\\n\\n# H1_2\\n\\n## H2_2\\n\\nText under H2_2.'\n", + "markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on,return_each_line=False)\n", + "chunked_docs = markdown_splitter.split_text(markdown_document)\n", + "for chunk in chunked_docs:\n", + " print(chunk)" + ] + }, + { + "cell_type": "markdown", + "id": "add24254", + "metadata": {}, + "source": [ + "`Test case 3`" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "c3f4690f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'content': 'Hi this is Jim \\nHi this is Joe', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}\n", + "{'content': 'Hi this is Lance', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}}\n", + "{'content': 'Hi this is Molly', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Baz'}}\n" + ] + } + ], + "source": [ + "markdown_document = '# Foo\\n\\n ## Bar\\n\\nHi this is Jim\\n\\nHi this is Joe\\n\\n ### Boo \\n\\n Hi this is Lance \\n\\n ## Baz\\n\\n Hi this is Molly' \n", + " \n", + "headers_to_split_on = [\n", + " (\"#\", \"Header 1\"),\n", + " (\"##\", \"Header 2\"),\n", + " (\"###\", \"Header 3\"),\n", + "]\n", + "markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on,return_each_line=False)\n", + "chunked_docs = markdown_splitter.split_text(markdown_document)\n", + "for chunk in chunked_docs:\n", + " print(chunk)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "20907fb7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'content': 'Hi this is Jim', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}\n", + "{'content': 'Hi this is Joe', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}\n", + "{'content': 'Hi this is Lance', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}}\n", + "{'content': 'Hi this is Molly', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Baz'}}\n" + ] + } + ], + "source": [ + "markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on,return_each_line=True)\n", + "chunked_docs = markdown_splitter.split_text(markdown_document)\n", + "for chunk in chunked_docs:\n", + " print(chunk)" + ] + }, + { + "cell_type": "markdown", + "id": "9c448431", + "metadata": {}, + "source": [ + "`Test case 4`" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "9858ea51", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'content': 'Hi this is Jim \\nHi this is Joe', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar'}}\n", + "{'content': 'Hi this is Lance', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo'}}\n", + "{'content': 'Hi this is John', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Bar', 'Header 3': 'Boo', 'Header 4': 'Bim'}}\n", + "{'content': 'Hi this is Molly', 'metadata': {'Header 1': 'Foo', 'Header 2': 'Baz'}}\n" + ] + } + ], + "source": [ + "markdown_document = '# Foo\\n\\n ## Bar\\n\\nHi this is Jim\\n\\nHi this is Joe\\n\\n ### Boo \\n\\n Hi this is Lance \\n\\n #### Bim \\n\\n Hi this is John \\n\\n ## Baz\\n\\n Hi this is Molly'\n", + " \n", + "headers_to_split_on = [\n", + " (\"#\", \"Header 1\"),\n", + " (\"##\", \"Header 2\"),\n", + " (\"###\", \"Header 3\"),\n", + " (\"####\", \"Header 4\"),\n", + "]\n", + " \n", + "markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on,return_each_line=False)\n", + "chunked_docs = markdown_splitter.split_text(markdown_document)\n", + "for chunk in chunked_docs:\n", + " print(chunk)" + ] + }, + { + "cell_type": "markdown", + "id": "bba6eb9e", + "metadata": {}, + "source": [ + "`Test case 5`" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "8af8f9a2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{'content': 'Markdown[9] is a lightweight markup language for creating formatted text using a plain-text editor. John Gruber created Markdown in 2004 as a markup language that is appealing to human readers in its source code form.[9] \\nMarkdown is widely used in blogging, instant messaging, online forums, collaborative software, documentation pages, and readme files.', 'metadata': {'Header 1': 'Intro', 'Header 2': 'History'}}\n", + "{'content': 'As Markdown popularity grew rapidly, many Markdown implementations appeared, driven mostly by the need for \\nadditional features such as tables, footnotes, definition lists,[note 1] and Markdown inside HTML blocks.', 'metadata': {'Header 1': 'Intro', 'Header 2': 'Rise and divergence'}}\n", + "{'content': 'From 2012, a group of people, including Jeff Atwood and John MacFarlane, launched what Atwood characterised as a standardisation effort.', 'metadata': {'Header 1': 'Intro', 'Header 2': 'Rise and divergence', 'Header 4': 'Standardization'}}\n", + "{'content': 'Implementations of Markdown are available for over a dozen programming languages.', 'metadata': {'Header 1': 'Intro', 'Header 2': 'Implementations'}}\n" + ] + } + ], + "source": [ + "markdown_document = '# Intro \\n\\n ## History \\n\\n Markdown[9] is a lightweight markup language for creating formatted text using a plain-text editor. John Gruber created Markdown in 2004 as a markup language that is appealing to human readers in its source code form.[9] \\n\\n Markdown is widely used in blogging, instant messaging, online forums, collaborative software, documentation pages, and readme files. \\n\\n ## Rise and divergence \\n\\n As Markdown popularity grew rapidly, many Markdown implementations appeared, driven mostly by the need for \\n\\n additional features such as tables, footnotes, definition lists,[note 1] and Markdown inside HTML blocks. \\n\\n #### Standardization \\n\\n From 2012, a group of people, including Jeff Atwood and John MacFarlane, launched what Atwood characterised as a standardisation effort. \\n\\n ## Implementations \\n\\n Implementations of Markdown are available for over a dozen programming languages.'\n", + " \n", + "headers_to_split_on = [\n", + " (\"#\", \"Header 1\"),\n", + " (\"##\", \"Header 2\"),\n", + " (\"###\", \"Header 3\"),\n", + " (\"####\", \"Header 4\"),\n", + "]\n", + " \n", + "markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on,return_each_line=False)\n", + "chunked_docs = markdown_splitter.split_text(markdown_document)\n", + "for chunk in chunked_docs:\n", + " print(chunk)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.16" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/langchain/text_splitter.py b/langchain/text_splitter.py index 89559505583c7..15723b66d0e39 100644 --- a/langchain/text_splitter.py +++ b/langchain/text_splitter.py @@ -12,12 +12,15 @@ Any, Callable, Collection, + Dict, Iterable, List, Literal, Optional, Sequence, + Tuple, Type, + TypedDict, TypeVar, Union, cast, @@ -254,6 +257,153 @@ def split_text(self, text: str) -> List[str]: return self._merge_splits(splits, _separator) +class LineType(TypedDict): + metadata: Dict[str, str] + content: str + + +class HeaderType(TypedDict): + level: int + name: str + data: str + + +class MarkdownHeaderTextSplitter: + """Implementation of splitting markdown files based on specified headers.""" + + def __init__( + self, headers_to_split_on: List[Tuple[str, str]], return_each_line: bool = False + ): + """Create a new MarkdownHeaderTextSplitter. + + Args: + headers_to_split_on: Headers we want to track + return_each_line: Return each line w/ associated headers + """ + # Output line-by-line or aggregated into chunks w/ common headers + self.return_each_line = return_each_line + # Given the headers we want to split on, + # (e.g., "#, ##, etc") order by length + self.headers_to_split_on = sorted( + headers_to_split_on, key=lambda split: len(split[0]), reverse=True + ) + + def aggregate_lines_to_chunks(self, lines: List[LineType]) -> List[LineType]: + """Combine lines with common metadata into chunks + Args: + lines: Line of text / associated header metadata + """ + aggregated_chunks: List[LineType] = [] + + for line in lines: + if ( + aggregated_chunks + and aggregated_chunks[-1]["metadata"] == line["metadata"] + ): + # If the last line in the aggregated list + # has the same metadata as the current line, + # append the current content to the last lines's content + aggregated_chunks[-1]["content"] += " \n" + line["content"] + else: + # Otherwise, append the current line to the aggregated list + aggregated_chunks.append(line) + return aggregated_chunks + + def split_text(self, text: str) -> List[LineType]: + """Split markdown file + Args: + text: Markdown file""" + + # Split the input text by newline character ("\n"). + lines = text.split("\n") + # Final output + lines_with_metadata: List[LineType] = [] + # Content and metadata of the chunk currently being processed + current_content: List[str] = [] + current_metadata: Dict[str, str] = {} + # Keep track of the nested header structure + # header_stack: List[Dict[str, Union[int, str]]] = [] + header_stack: List[HeaderType] = [] + initial_metadata: Dict[str, str] = {} + + for line in lines: + stripped_line = line.strip() + # Check each line against each of the header types (e.g., #, ##) + for sep, name in self.headers_to_split_on: + # Check if line starts with a header that we intend to split on + if stripped_line.startswith(sep) and ( + # Header with no text OR header is followed by space + # Both are valid conditions that sep is being used a header + len(stripped_line) == len(sep) + or stripped_line[len(sep)] == " " + ): + # Ensure we are tracking the header as metadata + if name is not None: + # Get the current header level + current_header_level = sep.count("#") + + # Pop out headers of lower or same level from the stack + while ( + header_stack + and header_stack[-1]["level"] >= current_header_level + ): + # We have encountered a new header + # at the same or higher level + popped_header = header_stack.pop() + # Clear the metadata for the + # popped header in initial_metadata + if popped_header["name"] in initial_metadata: + initial_metadata.pop(popped_header["name"]) + + # Push the current header to the stack + header: HeaderType = { + "level": current_header_level, + "name": name, + "data": stripped_line[len(sep) :].strip(), + } + header_stack.append(header) + # Update initial_metadata with the current header + initial_metadata[name] = header["data"] + + # Add the previous line to the lines_with_metadata + # only if current_content is not empty + if current_content: + lines_with_metadata.append( + { + "content": "\n".join(current_content), + "metadata": current_metadata.copy(), + } + ) + current_content.clear() + + break + else: + if stripped_line: + current_content.append(stripped_line) + elif current_content: + lines_with_metadata.append( + { + "content": "\n".join(current_content), + "metadata": current_metadata.copy(), + } + ) + current_content.clear() + + current_metadata = initial_metadata.copy() + + if current_content: + lines_with_metadata.append( + {"content": "\n".join(current_content), "metadata": current_metadata} + ) + + # lines_with_metadata has each line with associated header metadata + # aggregate these into chunks based on common metadata + if not self.return_each_line: + return self.aggregate_lines_to_chunks(lines_with_metadata) + else: + return lines_with_metadata + + # should be in newer Python versions (3.10+) # @dataclass(frozen=True, kw_only=True, slots=True) @dataclass(frozen=True) From 2f0088039d2b075c838632a5d4ec8cb045f8afa4 Mon Sep 17 00:00:00 2001 From: Zander Chase <130414180+vowelparrot@users.noreply.github.com> Date: Mon, 12 Jun 2023 17:13:49 -0700 Subject: [PATCH 45/46] Log tracer errors (#6066) Example (would log several times if not for the helper fn. Would emit no logs due to mulithreading previously) ![image](https://github.com/hwchase17/langchain/assets/130414180/070d25ae-1f06-4487-9617-0a6f66f3f01e) --- langchain/callbacks/tracers/langchain.py | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/langchain/callbacks/tracers/langchain.py b/langchain/callbacks/tracers/langchain.py index 9f734e2983b5e..1bdeb352b071e 100644 --- a/langchain/callbacks/tracers/langchain.py +++ b/langchain/callbacks/tracers/langchain.py @@ -16,6 +16,16 @@ from langchain.schema import BaseMessage, messages_to_dict logger = logging.getLogger(__name__) +_LOGGED = set() + + +def log_error_once(method: str, exception: Exception) -> None: + """Log an error once.""" + global _LOGGED + if (method, type(exception)) in _LOGGED: + return + _LOGGED.add((method, type(exception))) + logger.error(exception) class LangChainTracer(BaseTracer): @@ -76,11 +86,21 @@ def _persist_run_single(self, run: Run) -> None: extra = run_dict.get("extra", {}) extra["runtime"] = get_runtime_environment() run_dict["extra"] = extra - run = self.client.create_run(**run_dict, session_name=self.session_name) + try: + run = self.client.create_run(**run_dict, session_name=self.session_name) + except Exception as e: + # Errors are swallowed by the thread executor so we need to log them here + log_error_once("post", e) + raise def _update_run_single(self, run: Run) -> None: """Update a run.""" - self.client.update_run(run.id, **run.dict()) + try: + self.client.update_run(run.id, **run.dict()) + except Exception as e: + # Errors are swallowed by the thread executor so we need to log them here + log_error_once("patch", e) + raise def _on_llm_start(self, run: Run) -> None: """Persist an LLM run.""" From 5b6bbf4ab2a33ed0d33ff5d3cb3979a7edc15682 Mon Sep 17 00:00:00 2001 From: Julius Lipp <43986145+juliuslipp@users.noreply.github.com> Date: Tue, 13 Jun 2023 10:13:52 +0800 Subject: [PATCH 46/46] Add embaas document extraction api endpoints (#6048) # Introduces embaas document extraction api endpoints In this PR, we add support for embaas document extraction endpoints to Text Embedding Models (with LLMs, in different PRs coming). We currently offer the MTEB leaderboard top performers, will continue to add top embedding models and soon add support for customers to deploy thier own models. Additional Documentation + Infomation can be found [here](https://embaas.io). While developing this integration, I closely followed the patterns established by other langchain integrations. Nonetheless, if there are any aspects that require adjustments or if there's a better way to present a new integration, let me know! :) Additionally, I fixed some docs in the embeddings integration. Related PR: #5976 #### Who can review? DataLoaders - @eyurtsev --- .../document_loaders/examples/embaas.ipynb | 167 +++++++++++++ langchain/document_loaders/__init__.py | 3 + langchain/document_loaders/embaas.py | 234 ++++++++++++++++++ langchain/embeddings/embaas.py | 7 +- .../document_loaders/test_embaas.py | 59 +++++ 5 files changed, 466 insertions(+), 4 deletions(-) create mode 100644 docs/modules/indexes/document_loaders/examples/embaas.ipynb create mode 100644 langchain/document_loaders/embaas.py create mode 100644 tests/integration_tests/document_loaders/test_embaas.py diff --git a/docs/modules/indexes/document_loaders/examples/embaas.ipynb b/docs/modules/indexes/document_loaders/examples/embaas.ipynb new file mode 100644 index 0000000000000..0c8c19d71acf8 --- /dev/null +++ b/docs/modules/indexes/document_loaders/examples/embaas.ipynb @@ -0,0 +1,167 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Embaas\n", + "[embaas](https://embaas.io) is a fully managed NLP API service that offers features like embedding generation, document text extraction, document to embeddings and more. You can choose a [variety of pre-trained models](https://embaas.io/docs/models/embeddings).\n", + "\n", + "### Prerequisites\n", + "Create a free embaas account at [https://embaas.io/register](https://embaas.io/register) and generate an [API key](https://embaas.io/dashboard/api-keys)\n", + "\n", + "### Document Text Extraction API\n", + "The document text extraction API allows you to extract the text from a given document. The API supports a variety of document formats, including PDF, mp3, mp4 and more. For a full list of supported formats, check out the API docs (link below)." + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Set API key\n", + "embaas_api_key = \"YOUR_API_KEY\"\n", + "# or set environment variable\n", + "os.environ[\"EMBAAS_API_KEY\"] = \"YOUR_API_KEY\"" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "#### Using a blob (bytes)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "from langchain.document_loaders.embaas import EmbaasBlobLoader\n", + "from langchain.document_loaders.blob_loaders import Blob" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "blob_loader = EmbaasBlobLoader()\n", + "blob = Blob.from_path(\"example.pdf\")\n", + "documents = blob_loader.load(blob)" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# You can also directly create embeddings with your preferred embeddings model\n", + "blob_loader = EmbaasBlobLoader(params={\"model\": \"e5-large-v2\", \"should_embed\": True})\n", + "blob = Blob.from_path(\"example.pdf\")\n", + "documents = blob_loader.load(blob)\n", + "\n", + "print(documents[0][\"metadata\"][\"embedding\"])" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "start_time": "2023-06-12T22:19:48.366886Z", + "end_time": "2023-06-12T22:19:48.380467Z" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "#### Using a file" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "from langchain.document_loaders.embaas import EmbaasLoader" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "file_loader = EmbaasLoader(file_path=\"example.pdf\")\n", + "documents = file_loader.load()" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": 15, + "outputs": [], + "source": [ + "# Disable automatic text splitting\n", + "file_loader = EmbaasLoader(file_path=\"example.mp3\", params={\"should_chunk\": False})\n", + "documents = file_loader.load()" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "start_time": "2023-06-12T22:24:31.880857Z", + "end_time": "2023-06-12T22:24:31.894665Z" + } + } + }, + { + "cell_type": "markdown", + "source": [ + "For more detailed information about the embaas document text extraction API, please refer to [the official embaas API documentation](https://embaas.io/api-reference)." + ], + "metadata": { + "collapsed": false + } + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/langchain/document_loaders/__init__.py b/langchain/document_loaders/__init__.py index 87d2335e2344c..17f764b1f8e6e 100644 --- a/langchain/document_loaders/__init__.py +++ b/langchain/document_loaders/__init__.py @@ -31,6 +31,7 @@ OutlookMessageLoader, UnstructuredEmailLoader, ) +from langchain.document_loaders.embaas import EmbaasBlobLoader, EmbaasLoader from langchain.document_loaders.epub import UnstructuredEPubLoader from langchain.document_loaders.evernote import EverNoteLoader from langchain.document_loaders.excel import UnstructuredExcelLoader @@ -250,4 +251,6 @@ "WikipediaLoader", "YoutubeLoader", "SnowflakeLoader", + "EmbaasLoader", + "EmbaasBlobLoader", ] diff --git a/langchain/document_loaders/embaas.py b/langchain/document_loaders/embaas.py new file mode 100644 index 0000000000000..5dc4071e86e0a --- /dev/null +++ b/langchain/document_loaders/embaas.py @@ -0,0 +1,234 @@ +import base64 +import warnings +from typing import Any, Dict, Iterator, List, Optional + +import requests +from pydantic import BaseModel, root_validator, validator +from typing_extensions import NotRequired, TypedDict + +from langchain.docstore.document import Document +from langchain.document_loaders.base import BaseBlobParser, BaseLoader +from langchain.document_loaders.blob_loaders import Blob +from langchain.text_splitter import TextSplitter +from langchain.utils import get_from_dict_or_env + +EMBAAS_DOC_API_URL = "https://api.embaas.io/v1/document/extract-text/bytes/" + + +class EmbaasDocumentExtractionParameters(TypedDict): + """Parameters for the embaas document extraction API.""" + + mime_type: NotRequired[str] + """The mime type of the document.""" + file_extension: NotRequired[str] + """The file extension of the document.""" + file_name: NotRequired[str] + """The file name of the document.""" + + should_chunk: NotRequired[bool] + """Whether to chunk the document into pages.""" + chunk_size: NotRequired[int] + """The maximum size of the text chunks.""" + chunk_overlap: NotRequired[int] + """The maximum overlap allowed between chunks.""" + chunk_splitter: NotRequired[str] + """The text splitter class name for creating chunks.""" + separators: NotRequired[List[str]] + """The separators for chunks.""" + + should_embed: NotRequired[bool] + """Whether to create embeddings for the document in the response.""" + model: NotRequired[str] + """The model to pass to the Embaas document extraction API.""" + instruction: NotRequired[str] + """The instruction to pass to the Embaas document extraction API.""" + + +class EmbaasDocumentExtractionPayload(EmbaasDocumentExtractionParameters): + bytes: str + """The base64 encoded bytes of the document to extract text from.""" + + +class BaseEmbaasLoader(BaseModel): + embaas_api_key: Optional[str] = None + api_url: str = EMBAAS_DOC_API_URL + """The URL of the embaas document extraction API.""" + params: EmbaasDocumentExtractionParameters = EmbaasDocumentExtractionParameters() + """Additional parameters to pass to the embaas document extraction API.""" + + @root_validator(pre=True) + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + embaas_api_key = get_from_dict_or_env( + values, "embaas_api_key", "EMBAAS_API_KEY" + ) + values["embaas_api_key"] = embaas_api_key + return values + + +class EmbaasBlobLoader(BaseEmbaasLoader, BaseBlobParser): + """Wrapper around embaas's document byte loader service. + + To use, you should have the + environment variable ``EMBAAS_API_KEY`` set with your API key, or pass + it as a named parameter to the constructor. + + Example: + .. code-block:: python + + # Default parsing + from langchain.document_loaders.embaas import EmbaasBlobLoader + loader = EmbaasBlobLoader() + blob = Blob.from_path(path="example.mp3") + documents = loader.parse(blob=blob) + + # Custom api parameters (create embeddings automatically) + from langchain.document_loaders.embaas import EmbaasBlobLoader + loader = EmbaasBlobLoader( + params={ + "should_embed": True, + "model": "e5-large-v2", + "chunk_size": 256, + "chunk_splitter": "CharacterTextSplitter" + } + ) + blob = Blob.from_path(path="example.pdf") + documents = loader.parse(blob=blob) + """ + + def lazy_parse(self, blob: Blob) -> Iterator[Document]: + yield from self._get_documents(blob=blob) + + @staticmethod + def _api_response_to_documents(chunks: List[Dict[str, Any]]) -> List[Document]: + """Convert the API response to a list of documents.""" + docs = [] + for chunk in chunks: + metadata = chunk["metadata"] + if chunk.get("embedding", None) is not None: + metadata["embedding"] = chunk["embedding"] + doc = Document(page_content=chunk["text"], metadata=metadata) + docs.append(doc) + + return docs + + def _generate_payload(self, blob: Blob) -> EmbaasDocumentExtractionPayload: + """Generates payload for the API request.""" + base64_byte_str = base64.b64encode(blob.as_bytes()).decode() + payload: EmbaasDocumentExtractionPayload = EmbaasDocumentExtractionPayload( + bytes=base64_byte_str, + # Workaround for mypy issue: https://github.com/python/mypy/issues/9408 + # type: ignore + **self.params, + ) + + if blob.mimetype is not None and payload.get("mime_type", None) is None: + payload["mime_type"] = blob.mimetype + + return payload + + def _handle_request( + self, payload: EmbaasDocumentExtractionPayload + ) -> List[Document]: + """Sends a request to the embaas API and handles the response.""" + headers = { + "Authorization": f"Bearer {self.embaas_api_key}", + "Content-Type": "application/json", + } + + response = requests.post(self.api_url, headers=headers, json=payload) + response.raise_for_status() + + parsed_response = response.json() + return EmbaasBlobLoader._api_response_to_documents( + chunks=parsed_response["data"]["chunks"] + ) + + def _get_documents(self, blob: Blob) -> Iterator[Document]: + """Get the documents from the blob.""" + payload = self._generate_payload(blob=blob) + + try: + documents = self._handle_request(payload=payload) + except requests.exceptions.RequestException as e: + if e.response is None or not e.response.text: + raise ValueError( + f"Error raised by embaas document text extraction API: {e}" + ) + + parsed_response = e.response.json() + if "message" in parsed_response: + raise ValueError( + f"Validation Error raised by embaas document text extraction API:" + f" {parsed_response['message']}" + ) + raise + + yield from documents + + +class EmbaasLoader(BaseEmbaasLoader, BaseLoader): + """Wrapper around embaas's document loader service. + + To use, you should have the + environment variable ``EMBAAS_API_KEY`` set with your API key, or pass + it as a named parameter to the constructor. + + Example: + .. code-block:: python + + # Default parsing + from langchain.document_loaders.embaas import EmbaasLoader + loader = EmbaasLoader(file_path="example.mp3") + documents = loader.load() + + # Custom api parameters (create embeddings automatically) + from langchain.document_loaders.embaas import EmbaasBlobLoader + loader = EmbaasBlobLoader( + file_path="example.pdf", + params={ + "should_embed": True, + "model": "e5-large-v2", + "chunk_size": 256, + "chunk_splitter": "CharacterTextSplitter" + } + ) + documents = loader.load() + """ + + file_path: str + """The path to the file to load.""" + blob_loader: Optional[EmbaasBlobLoader] + """The blob loader to use. If not provided, a default one will be created.""" + + @validator("blob_loader", always=True) + def validate_blob_loader( + cls, v: EmbaasBlobLoader, values: Dict + ) -> EmbaasBlobLoader: + return v or EmbaasBlobLoader( + embaas_api_key=values["embaas_api_key"], + api_url=values["api_url"], + params=values["params"], + ) + + def lazy_load(self) -> Iterator[Document]: + """Load the documents from the file path lazily.""" + blob = Blob.from_path(path=self.file_path) + + assert self.blob_loader is not None + # Should never be None, but mypy doesn't know that. + yield from self.blob_loader.lazy_parse(blob=blob) + + def load(self) -> List[Document]: + return list(self.lazy_load()) + + def load_and_split( + self, text_splitter: Optional[TextSplitter] = None + ) -> List[Document]: + if self.params.get("should_embed", False): + warnings.warn( + "Embeddings are not supported with load_and_split." + " Use the API splitter to properly generate embeddings." + " For more information see embaas.io docs." + ) + return super().load_and_split(text_splitter=text_splitter) diff --git a/langchain/embeddings/embaas.py b/langchain/embeddings/embaas.py index 8a9134f711712..e0a42e46d573e 100644 --- a/langchain/embeddings/embaas.py +++ b/langchain/embeddings/embaas.py @@ -32,17 +32,16 @@ class EmbaasEmbeddings(BaseModel, Embeddings): .. code-block:: python # Initialise with default model and instruction - from langchain.llms import EmbaasEmbeddings + from langchain.embeddings import EmbaasEmbeddings emb = EmbaasEmbeddings() # Initialise with custom model and instruction - from langchain.llms import EmbaasEmbeddings + from langchain.embeddings import EmbaasEmbeddings emb_model = "instructor-large" emb_inst = "Represent the Wikipedia document for retrieval" emb = EmbaasEmbeddings( model=emb_model, - instruction=emb_inst, - embaas_api_key="your-api-key" + instruction=emb_inst ) """ diff --git a/tests/integration_tests/document_loaders/test_embaas.py b/tests/integration_tests/document_loaders/test_embaas.py new file mode 100644 index 0000000000000..2170a143c66ac --- /dev/null +++ b/tests/integration_tests/document_loaders/test_embaas.py @@ -0,0 +1,59 @@ +from typing import Any +from unittest.mock import MagicMock, patch + +import responses + +from langchain.document_loaders import EmbaasBlobLoader, EmbaasLoader +from langchain.document_loaders.blob_loaders import Blob +from langchain.document_loaders.embaas import EMBAAS_DOC_API_URL + + +@responses.activate +def test_handle_request() -> None: + responses.add( + responses.POST, + EMBAAS_DOC_API_URL, + json={ + "data": { + "chunks": [ + { + "text": "Hello", + "metadata": {"start_page": 1, "end_page": 2}, + "embeddings": [0.0], + } + ] + } + }, + status=200, + ) + + loader = EmbaasBlobLoader(embaas_api_key="api_key", params={"should_embed": True}) + documents = loader.parse(blob=Blob.from_data(data="Hello")) + assert len(documents) == 1 + assert documents[0].page_content == "Hello" + assert documents[0].metadata["start_page"] == 1 + assert documents[0].metadata["end_page"] == 2 + assert documents[0].metadata["embeddings"] == [0.0] + + +@responses.activate +def test_handle_request_exception() -> None: + responses.add( + responses.POST, + EMBAAS_DOC_API_URL, + json={"message": "Invalid request"}, + status=400, + ) + loader = EmbaasBlobLoader(embaas_api_key="api_key") + try: + loader.parse(blob=Blob.from_data(data="Hello")) + except Exception as e: + assert "Invalid request" in str(e) + + +@patch.object(EmbaasBlobLoader, "_handle_request") +def test_load(mock_handle_request: Any) -> None: + mock_handle_request.return_value = [MagicMock()] + loader = EmbaasLoader(file_path="test_embaas.py", embaas_api_key="api_key") + documents = loader.load() + assert len(documents) == 1