1414from redis .cluster import RedisCluster
1515from redis .cluster import ClusterNode
1616from .proxy import Proxy
17- from .seeder import SeederBase
18- from .seeder import StaticSeeder
17+ from .seeder import Seeder , SeederBase , StaticSeeder
1918
2019from . import dfly_args
2120
@@ -33,6 +32,11 @@ def monotonically_increasing_port_number():
3332next_port = monotonically_increasing_port_number ()
3433
3534
35+ async def get_memory (client , field ):
36+ info = await client .info ("memory" )
37+ return info [field ]
38+
39+
3640class RedisClusterNode :
3741 def __init__ (self , port ):
3842 self .port = port
@@ -1981,6 +1985,7 @@ async def node1size0():
19811985
19821986@dfly_args ({"proactor_threads" : 2 , "cluster_mode" : "yes" })
19831987@pytest .mark .asyncio
1988+ @pytest .mark .opt_only
19841989async def test_cluster_migration_huge_container (df_factory : DflyInstanceFactory ):
19851990 instances = [
19861991 df_factory .create (port = next (next_port ), admin_port = next (next_port )) for i in range (2 )
@@ -1995,7 +2000,7 @@ async def test_cluster_migration_huge_container(df_factory: DflyInstanceFactory)
19952000
19962001 logging .debug ("Generating huge containers" )
19972002 seeder = StaticSeeder (
1998- key_target = 10 ,
2003+ key_target = 100 ,
19992004 data_size = 10_000_000 ,
20002005 collection_size = 10_000 ,
20012006 variance = 1 ,
@@ -2005,6 +2010,8 @@ async def test_cluster_migration_huge_container(df_factory: DflyInstanceFactory)
20052010 await seeder .run (nodes [0 ].client )
20062011 source_data = await StaticSeeder .capture (nodes [0 ].client )
20072012
2013+ mem_before = await get_memory (nodes [0 ].client , "used_memory_rss" )
2014+
20082015 nodes [0 ].migrations = [
20092016 MigrationInfo ("127.0.0.1" , instances [1 ].admin_port , [(0 , 16383 )], nodes [1 ].id )
20102017 ]
@@ -2017,6 +2024,74 @@ async def test_cluster_migration_huge_container(df_factory: DflyInstanceFactory)
20172024 target_data = await StaticSeeder .capture (nodes [1 ].client )
20182025 assert source_data == target_data
20192026
2027+ # Get peak memory, because migration removes the data
2028+ mem_after = await get_memory (nodes [0 ].client , "used_memory_peak_rss" )
2029+ logging .debug (f"Memory before { mem_before } after { mem_after } " )
2030+ assert mem_after < mem_before * 1.1
2031+
2032+
2033+ @dfly_args ({"proactor_threads" : 2 , "cluster_mode" : "yes" })
2034+ @pytest .mark .parametrize ("chunk_size" , [1_000_000 , 30 ])
2035+ @pytest .mark .asyncio
2036+ async def test_cluster_migration_while_seeding (
2037+ df_factory : DflyInstanceFactory , df_seeder_factory : DflySeederFactory , chunk_size
2038+ ):
2039+ instances = [
2040+ df_factory .create (
2041+ port = next (next_port ),
2042+ admin_port = next (next_port ),
2043+ serialization_max_chunk_size = chunk_size ,
2044+ )
2045+ for _ in range (2 )
2046+ ]
2047+ df_factory .start_all (instances )
2048+
2049+ nodes = [await create_node_info (instance ) for instance in instances ]
2050+ nodes [0 ].slots = [(0 , 16383 )]
2051+ nodes [1 ].slots = []
2052+ client0 = nodes [0 ].client
2053+ client1 = nodes [1 ].client
2054+
2055+ await push_config (json .dumps (generate_config (nodes )), [node .admin_client for node in nodes ])
2056+
2057+ logging .debug ("Seeding cluster" )
2058+ seeder = df_seeder_factory .create (
2059+ keys = 10_000 , port = instances [0 ].port , cluster_mode = True , mirror_to_fake_redis = True
2060+ )
2061+ await seeder .run (target_deviation = 0.1 )
2062+
2063+ seed = asyncio .create_task (seeder .run ())
2064+ await asyncio .sleep (1 )
2065+
2066+ nodes [0 ].migrations = [
2067+ MigrationInfo ("127.0.0.1" , instances [1 ].admin_port , [(0 , 16383 )], nodes [1 ].id )
2068+ ]
2069+ logging .debug ("Migrating slots" )
2070+ await push_config (json .dumps (generate_config (nodes )), [node .admin_client for node in nodes ])
2071+
2072+ logging .debug ("Waiting for migration to finish" )
2073+ await wait_for_status (nodes [0 ].admin_client , nodes [1 ].id , "FINISHED" , timeout = 300 )
2074+ logging .debug ("Migration finished" )
2075+
2076+ logging .debug ("Finalizing migration" )
2077+ nodes [0 ].slots = []
2078+ nodes [1 ].slots = [(0 , 16383 )]
2079+ await push_config (json .dumps (generate_config (nodes )), [node .admin_client for node in nodes ])
2080+
2081+ await asyncio .sleep (1 ) # Let seeder feed dest before migration finishes
2082+
2083+ seeder .stop ()
2084+ await seed
2085+ logging .debug ("Seeding finished" )
2086+
2087+ assert (
2088+ await get_memory (client0 , "used_memory_peak_rss" )
2089+ < await get_memory (client0 , "used_memory_rss" ) * 1.1
2090+ )
2091+
2092+ capture = await seeder .capture_fake_redis ()
2093+ assert await seeder .compare (capture , instances [1 ].port )
2094+
20202095
20212096def parse_lag (replication_info : str ):
20222097 lags = re .findall ("lag=([0-9]+)\r \n " , replication_info )
0 commit comments