@@ -2498,7 +2498,7 @@ def test_021(self, step=1):
24982498 disk = clickhouse .query (chi , "select disk_name from system.parts where table='test_local_021'" )
24992499 print (f"out : { disk } " )
25002500 print (f"want: default" )
2501- assert disk == "default" or True
2501+ assert disk == "default"
25022502
25032503 with When ("alter table test_local_021 move partition tuple() to disk 'disk2'" ):
25042504 clickhouse .query_with_error (chi , "alter table test_local_021 move partition tuple() to disk 'disk2'" )
@@ -2507,12 +2507,7 @@ def test_021(self, step=1):
25072507 disk = clickhouse .query (chi ,"select disk_name from system.parts where table='test_local_021'" )
25082508 print (f"out : { disk } " )
25092509 print (f"want: disk2" )
2510- assert disk == "disk2" or True
2511-
2512- with And ("Table should exist" ):
2513- pause ()
2514- out = clickhouse .query (chi , "select * from test_local_021" )
2515- assert out == "1"
2510+ assert disk == "disk2"
25162511
25172512 with When ("Downscale disk1 back to 1Gi" ):
25182513 pause ()
@@ -5158,7 +5153,6 @@ def test_010056(self):
51585153
51595154@TestScenario
51605155@Name ("test_010057. Test reconcile concurrency settings on CHI level" )
5161- @Tags ("NO_PARALLEL" )
51625156def test_010057 (self ):
51635157 create_shell_namespace_clickhouse_template ()
51645158
@@ -5180,6 +5174,7 @@ def test_010057(self):
51805174
51815175 with When ("First shard is Running" ):
51825176 kubectl .wait_pod_status (f"chi-{ chi } -{ cluster } -0-0-0" , "Running" )
5177+ time .sleep (10 )
51835178 with Then ("Other shards are running or being created" ):
51845179 for shard in [1 ,2 ,3 ]:
51855180 pod_status = kubectl .get_pod_status (f"chi-{ chi } -{ cluster } -{ shard } -0-0" )
0 commit comments