Skip to content

Commit 6252d1c

Browse files
committed
Test config on every shard
1 parent 8075f55 commit 6252d1c

File tree

1 file changed

+34
-26
lines changed

1 file changed

+34
-26
lines changed

tests/flow/tests_commands.py

Lines changed: 34 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -542,29 +542,35 @@ def test_ai_config(env):
542542
if not TEST_PT:
543543
env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True)
544544
return
545-
con = get_connection(env, '{1}')
545+
546+
conns = env.getOSSMasterNodesConnectionList()
547+
if env.isCluster():
548+
env.assertEqual(len(conns), env.shardsCount)
549+
546550
model = load_file_content('pt-minimal.pt')
547551

548-
# Get the default configs.
549-
res = con.execute_command('AI.CONFIG', 'GET', 'BACKENDSPATH')
550-
env.assertEqual(res, None)
551-
res = con.execute_command('AI.CONFIG', 'GET', 'MODEL_CHUNK_SIZE')
552-
env.assertEqual(res, 511*1024*1024)
553-
554-
# Change the default backends path and load backend.
555-
con.execute_command('AI.CONFIG', 'BACKENDSPATH', ROOT+"/install-cpu")
556-
res = con.execute_command('AI.CONFIG', 'GET', 'BACKENDSPATH')
557-
env.assertEqual(res, f'{ROOT}/install-cpu'.encode())
558-
be_info = get_info_section(con, "backends_info")
559-
env.assertEqual(len(be_info), 0) # no backends are loaded.
560-
check_error_message(env, con, 'error loading backend', 'AI.CONFIG', 'LOADBACKEND', 'TORCH', ".")
561-
562-
res = con.execute_command('AI.CONFIG', 'LOADBACKEND', 'TORCH', "backends/redisai_torch/redisai_torch.so")
563-
env.assertEqual(res, b'OK')
564-
be_info = get_info_section(con, "backends_info")
565-
env.assertEqual(len(be_info), 1) # one backend is loaded now - torch.
566-
567-
# Set the same model twice - with and without chunks, and assert equality.
552+
for con in conns:
553+
# Get the default configs.
554+
res = con.execute_command('AI.CONFIG', 'GET', 'BACKENDSPATH')
555+
env.assertEqual(res, None)
556+
res = con.execute_command('AI.CONFIG', 'GET', 'MODEL_CHUNK_SIZE')
557+
env.assertEqual(res, 511*1024*1024)
558+
559+
# Change the default backends path and load backend.
560+
con.execute_command('AI.CONFIG', 'BACKENDSPATH', ROOT+"/install-cpu")
561+
res = con.execute_command('AI.CONFIG', 'GET', 'BACKENDSPATH')
562+
env.assertEqual(res, f'{ROOT}/install-cpu'.encode())
563+
be_info = get_info_section(con, "backends_info")
564+
env.assertEqual(len(be_info), 0) # no backends are loaded.
565+
check_error_message(env, con, 'error loading backend', 'AI.CONFIG', 'LOADBACKEND', 'TORCH', ".")
566+
567+
res = con.execute_command('AI.CONFIG', 'LOADBACKEND', 'TORCH', "backends/redisai_torch/redisai_torch.so")
568+
env.assertEqual(res, b'OK')
569+
be_info = get_info_section(con, "backends_info")
570+
env.assertEqual(len(be_info), 1) # one backend is loaded now - torch.
571+
572+
# Set the same model twice on some shard - with and without chunks, and assert equality.
573+
con = get_connection(env, '{1}')
568574
chunk_size = len(model) // 3
569575
model_chunks = [model[i:i + chunk_size] for i in range(0, len(model), chunk_size)]
570576
con.execute_command('AI.MODELSTORE', 'm1{1}', 'TORCH', DEVICE, 'BLOB', model)
@@ -573,13 +579,15 @@ def test_ai_config(env):
573579
model2 = con.execute_command('AI.MODELGET', 'm2{1}', 'BLOB')
574580
env.assertEqual(model1, model2)
575581

576-
# Change the default model_chunk_size.
577-
ret = con.execute_command('AI.CONFIG', 'MODEL_CHUNK_SIZE', chunk_size)
578-
env.assertEqual(ret, b'OK')
579-
res = con.execute_command('AI.CONFIG', 'GET', 'MODEL_CHUNK_SIZE')
580-
env.assertEqual(res, chunk_size)
582+
for con in conns:
583+
# Change the default model_chunk_size.
584+
ret = con.execute_command('AI.CONFIG', 'MODEL_CHUNK_SIZE', chunk_size)
585+
env.assertEqual(ret, b'OK')
586+
res = con.execute_command('AI.CONFIG', 'GET', 'MODEL_CHUNK_SIZE')
587+
env.assertEqual(res, chunk_size)
581588

582589
# Verify that AI.MODELGET returns the model's blob in chunks, with or without the META arg.
590+
con = get_connection(env, '{1}')
583591
model2 = con.execute_command('AI.MODELGET', 'm1{1}', 'BLOB')
584592
env.assertEqual(len(model2), len(model_chunks))
585593
env.assertTrue(all([el1 == el2 for el1, el2 in zip(model2, model_chunks)]))

0 commit comments

Comments
 (0)