Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

*: Rename "default" cluster to "quickstart" #24019

Merged
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions doc/developer/platform-checks.md
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ class DropCreateDefaultReplica(Action):
def execute(self, c: Composition) -> None:
c.sql(
"""
ALTER CLUSTER default SET (REPLICATION FACTOR 0);
ALTER CLUSTER default SET (SIZE '1', REPLICATION FACTOR 1);
ALTER CLUSTER quickstart SET (REPLICATION FACTOR 0);
ALTER CLUSTER quickstart SET (SIZE '1', REPLICATION FACTOR 1);
"""
)
```
Expand Down
2 changes: 1 addition & 1 deletion misc/dbt-materialize/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ dbt-materialize:
pass: [password]
dbname: [database]
schema: [dbt schema]
cluster: [cluster] # default 'default'
cluster: [cluster] # default 'quickstart'
sslmode: require
keepalives_idle: 0 # default 0
retries: 1 # default 1 retry on error/timeout when opening connections
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def connect(**kwargs):

@dataclass
class MaterializeCredentials(PostgresCredentials):
cluster: Optional[str] = "default"
cluster: Optional[str] = "quickstart"

@property
def type(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,4 @@ prompts:
default: 'public'
cluster:
hint: 'dev cluster'
default: 'default'
default: 'quickstart'
4 changes: 2 additions & 2 deletions misc/dbt-materialize/mzcompose.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def workflow_default(c: Composition, parser: WorkflowArgumentParser) -> None:
options=test_case.materialized_options,
image=test_case.materialized_image,
volumes_extra=["secrets:/secrets"],
# Disable RBAC checks because of error on "DROP CLUSTER default CASCADE":
# InsufficientPrivilege: must be owner of CLUSTER default
# Disable RBAC checks because of error on "DROP CLUSTER quickstart CASCADE":
# InsufficientPrivilege: must be owner of CLUSTER quickstart
# TODO: Can dbt connect using mz_system user instead of materialize?
additional_system_parameter_defaults={
"enable_rbac_checks": "false",
Expand Down
8 changes: 4 additions & 4 deletions misc/dbt-materialize/tests/adapter/test_clusters.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
models_expected_clusters = """
materialized_view_name,cluster_name,index_name,index_cluster_name
override_cluster,not_default,,
override_index_cluster,default,i_col_1_idx,not_default
override_index_cluster,quickstart,i_col_1_idx,not_default
override_cluster_and_index,not_default,c_i_col_1_idx,not_default
""".lstrip()

Expand Down Expand Up @@ -111,13 +111,13 @@ def test_materialize_override_noexist(self, project):
# not error if a user-provided cluster is specified as a connection or
# model config, but will error otherwise.
# See #17197: https://github.com/MaterializeInc/materialize/pull/17197
def test_materialize_drop_default(self, project):
project.run_sql("DROP CLUSTER default CASCADE")
def test_materialize_drop_quickstart(self, project):
project.run_sql("DROP CLUSTER quickstart CASCADE")

run_dbt(["run", "--models", "override_cluster"], expect_pass=True)
run_dbt(["run", "--models", "default_cluster"], expect_pass=False)

project.run_sql("CREATE CLUSTER default SIZE = '1'")
project.run_sql("CREATE CLUSTER quickstart SIZE = '1'")


class TestProjectConfigCluster:
Expand Down
8 changes: 4 additions & 4 deletions misc/dbt-materialize/tests/adapter/test_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,12 +153,12 @@ def models(self):
"contract_invalid_cluster.sql": test_view,
}

# In the absence of the pre-installed `default` cluster, Materialize should
# In the absence of the pre-installed `quickstart` cluster, Materialize should
# not error if data contracts are enforced.
# See #23600: https://github.com/MaterializeInc/materialize/issues/23600
def test_materialize_drop_default(self, project):
project.run_sql("DROP CLUSTER default CASCADE")
def test_materialize_drop_quickstart(self, project):
project.run_sql("DROP CLUSTER quickstart CASCADE")

run_dbt(["run", "--models", "contract_invalid_cluster"], expect_pass=True)

project.run_sql("CREATE CLUSTER default SIZE = '1'")
project.run_sql("CREATE CLUSTER quickstart SIZE = '1'")
6 changes: 3 additions & 3 deletions misc/python/materialize/checks/all_checks/identifiers.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ def initialize(self) -> Testdrive:
> CREATE TYPE {dq(self.ident["type"])} AS LIST (ELEMENT TYPE = text);
> CREATE TABLE {dq(self.ident["schema"])}.{dq(self.ident["table"])} ({dq(self.ident["column"])} TEXT, c2 {dq(self.ident["type"])});
> INSERT INTO {dq(self.ident["schema"])}.{dq(self.ident["table"])} VALUES ({sq(self.ident["value1"])}, LIST[{sq(self.ident["value2"])}]::{dq(self.ident["type"])});
> CREATE MATERIALIZED VIEW {dq(self.ident["schema"])}.{dq(self.ident["mv0"])} IN CLUSTER default AS
> CREATE MATERIALIZED VIEW {dq(self.ident["schema"])}.{dq(self.ident["mv0"])} IN CLUSTER {self._default_cluster()} AS
SELECT COUNT({dq(self.ident["column"])}) FROM {dq(self.ident["schema"])}.{dq(self.ident["table"])};

$ kafka-create-topic topic=sink-source-ident
Expand All @@ -112,7 +112,7 @@ def initialize(self) -> Testdrive:
FROM KAFKA CONNECTION {dq(self.ident["kafka_conn"])} (TOPIC 'testdrive-sink-source-ident-${{testdrive.seed}}')
FORMAT AVRO USING CONFLUENT SCHEMA REGISTRY CONNECTION {dq(self.ident["csr_conn"])}
ENVELOPE UPSERT;
> CREATE MATERIALIZED VIEW {dq(self.ident["source_view"])} IN CLUSTER default AS
> CREATE MATERIALIZED VIEW {dq(self.ident["source_view"])} IN CLUSTER {self._default_cluster()} AS
SELECT LEFT(key1, 2) as l_k, LEFT(f1, 1) AS l_v, COUNT(*) AS c FROM {dq(self.ident["source"])} GROUP BY LEFT(key1, 2), LEFT(f1, 1);
> CREATE SINK {dq(self.ident["schema"])}.{dq(self.ident["sink0"])} FROM {dq(self.ident["source_view"])}
INTO KAFKA CONNECTION {dq(self.ident["kafka_conn"])} (TOPIC 'sink-sink-ident0')
Expand All @@ -137,7 +137,7 @@ def manipulate(self) -> list[Testdrive]:
f"""
> SET CLUSTER=identifiers;
> SET DATABASE={dq(self.ident["db"])};
> CREATE MATERIALIZED VIEW {dq(self.ident["schema"])}.{dq(self.ident["mv" + i])} IN CLUSTER default AS
> CREATE MATERIALIZED VIEW {dq(self.ident["schema"])}.{dq(self.ident["mv" + i])} IN CLUSTER {self._default_cluster()} AS
SELECT {dq(self.ident["column"])}, c2 as {dq(self.ident["alias"])} FROM {dq(self.ident["schema"])}.{dq(self.ident["table"])};
> INSERT INTO {dq(self.ident["schema"])}.{dq(self.ident["table"])} VALUES ({sq(self.ident["value1"])}, LIST[{sq(self.ident["value2"])}]::{dq(self.ident["type"])});
> CREATE SINK {dq(self.ident["schema"])}.{dq(self.ident["sink" + i])} FROM {dq(self.ident["source_view"])}
Expand Down
4 changes: 2 additions & 2 deletions misc/python/materialize/checks/all_checks/null_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def manipulate(self) -> list[Testdrive]:
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
f"""
> SHOW CREATE VIEW null_value_view1;
materialize.public.null_value_view1 "CREATE VIEW \\"materialize\\".\\"public\\".\\"null_value_view1\\" AS SELECT \\"f1\\", \\"f2\\", NULL FROM \\"materialize\\".\\"public\\".\\"null_value_table\\" WHERE \\"f1\\" IS NULL OR \\"f1\\" IS NOT NULL OR \\"f1\\" = NULL"

Expand All @@ -70,7 +70,7 @@ def validate(self) -> Testdrive:
<null> <null> <null>

> SHOW CREATE MATERIALIZED VIEW null_value_view2;
materialize.public.null_value_view2 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"null_value_view2\\" IN CLUSTER \\"default\\" AS SELECT \\"f1\\", \\"f2\\", NULL FROM \\"materialize\\".\\"public\\".\\"null_value_table\\" WHERE \\"f1\\" IS NULL OR \\"f1\\" IS NOT NULL OR \\"f1\\" = NULL"
materialize.public.null_value_view2 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"null_value_view2\\" IN CLUSTER \\"{self._default_cluster()}\\" AS SELECT \\"f1\\", \\"f2\\", NULL FROM \\"materialize\\".\\"public\\".\\"null_value_table\\" WHERE \\"f1\\" IS NULL OR \\"f1\\" IS NOT NULL OR \\"f1\\" = NULL"

> SELECT * FROM null_value_view2;
<null> <null> <null>
Expand Down
2 changes: 1 addition & 1 deletion misc/python/materialize/checks/all_checks/owners.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def _create_objects(self, role: str, i: int, expensive: bool = False) -> str:
$[version>=5200] postgres-execute connection=postgres://mz_system@${{testdrive.materialize-internal-sql-addr}}
GRANT CREATE ON DATABASE materialize TO {role}
GRANT CREATE ON SCHEMA materialize.public TO {role}
GRANT CREATE ON CLUSTER default TO {role}
GRANT CREATE ON CLUSTER {self._default_cluster()} TO {role}
$[version>=5900] postgres-execute connection=postgres://mz_system@${{testdrive.materialize-internal-sql-addr}}
GRANT CREATEDB ON SYSTEM TO {role}
$[version<5900] postgres-execute connection=postgres://mz_system@${{testdrive.materialize-internal-sql-addr}}
Expand Down
6 changes: 3 additions & 3 deletions misc/python/materialize/checks/all_checks/rename_index.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ def manipulate(self) -> list[Testdrive]:
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
f"""
> SHOW INDEXES ON rename_index_table;
rename_index_index2 rename_index_table default {f2}
rename_index_index3 rename_index_table default {f2}
rename_index_index2 rename_index_table {self._default_cluster()} {{f2}}
rename_index_index3 rename_index_table {self._default_cluster()} {{f2}}

> SELECT * FROM rename_index_view1;
1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ def manipulate(self) -> list[Testdrive]:
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
f"""
> SHOW CREATE MATERIALIZED VIEW string_bytea_types_view1;
materialize.public.string_bytea_types_view1 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"string_bytea_types_view1\\" IN CLUSTER \\"default\\" AS SELECT \\"text_col\\", \\"bytea_col\\", 'това'::\\"pg_catalog\\".\\"text\\", '\\\\xAAAA'::\\"pg_catalog\\".\\"bytea\\" FROM \\"materialize\\".\\"public\\".\\"text_bytea_types_table\\" WHERE \\"text_col\\" >= ''::\\"pg_catalog\\".\\"text\\" AND \\"bytea_col\\" >= ''::\\"pg_catalog\\".\\"bytea\\""
materialize.public.string_bytea_types_view1 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"string_bytea_types_view1\\" IN CLUSTER \\"{self._default_cluster()}\\" AS SELECT \\"text_col\\", \\"bytea_col\\", 'това'::\\"pg_catalog\\".\\"text\\", '\\\\xAAAA'::\\"pg_catalog\\".\\"bytea\\" FROM \\"materialize\\".\\"public\\".\\"text_bytea_types_table\\" WHERE \\"text_col\\" >= ''::\\"pg_catalog\\".\\"text\\" AND \\"bytea_col\\" >= ''::\\"pg_catalog\\".\\"bytea\\""

> SELECT text_col, text, LENGTH(bytea_col), LENGTH(bytea) FROM string_bytea_types_view1;
aaaa това 2 2
Expand Down
18 changes: 9 additions & 9 deletions misc/python/materialize/checks/all_checks/top_k.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,16 +58,16 @@ def manipulate(self) -> list[Testdrive]:
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
f"""
> SHOW CREATE MATERIALIZED VIEW basic_topk_view1;
materialize.public.basic_topk_view1 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"basic_topk_view1\\" IN CLUSTER \\"default\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"basic_topk_table\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" DESC NULLS LAST LIMIT 2"
materialize.public.basic_topk_view1 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"basic_topk_view1\\" IN CLUSTER \\"{self._default_cluster()}\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"basic_topk_table\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" DESC NULLS LAST LIMIT 2"

> SELECT * FROM basic_topk_view1;
2 32
3 48

> SHOW CREATE MATERIALIZED VIEW basic_topk_view2;
materialize.public.basic_topk_view2 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"basic_topk_view2\\" IN CLUSTER \\"default\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"basic_topk_table\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" ASC NULLS FIRST LIMIT 2"
materialize.public.basic_topk_view2 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"basic_topk_view2\\" IN CLUSTER \\"{self._default_cluster()}\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"basic_topk_table\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" ASC NULLS FIRST LIMIT 2"

> SELECT * FROM basic_topk_view2;
1 16
Expand Down Expand Up @@ -121,16 +121,16 @@ def manipulate(self) -> list[Testdrive]:
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
f"""
> SHOW CREATE MATERIALIZED VIEW monotonic_topk_view1;
materialize.public.monotonic_topk_view1 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"monotonic_topk_view1\\" IN CLUSTER \\"default\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"monotonic_topk_source\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" DESC NULLS LAST LIMIT 2"
materialize.public.monotonic_topk_view1 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"monotonic_topk_view1\\" IN CLUSTER \\"{self._default_cluster()}\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"monotonic_topk_source\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" DESC NULLS LAST LIMIT 2"

> SELECT * FROM monotonic_topk_view1;
E 5
D 4

> SHOW CREATE MATERIALIZED VIEW monotonic_topk_view2;
materialize.public.monotonic_topk_view2 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"monotonic_topk_view2\\" IN CLUSTER \\"default\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"monotonic_topk_source\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" ASC NULLS FIRST LIMIT 2"
materialize.public.monotonic_topk_view2 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"monotonic_topk_view2\\" IN CLUSTER \\"{self._default_cluster()}\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"monotonic_topk_source\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" ASC NULLS FIRST LIMIT 2"

> SELECT * FROM monotonic_topk_view2;
A 1
Expand Down Expand Up @@ -184,15 +184,15 @@ def manipulate(self) -> list[Testdrive]:
def validate(self) -> Testdrive:
return Testdrive(
dedent(
"""
f"""
> SHOW CREATE MATERIALIZED VIEW monotonic_top1_view1;
materialize.public.monotonic_top1_view1 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"monotonic_top1_view1\\" IN CLUSTER \\"default\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"monotonic_top1_source\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" DESC NULLS LAST LIMIT 1"
materialize.public.monotonic_top1_view1 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"monotonic_top1_view1\\" IN CLUSTER \\"{self._default_cluster()}\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"monotonic_top1_source\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" DESC NULLS LAST LIMIT 1"

> SELECT * FROM monotonic_top1_view1;
D 5

> SHOW CREATE MATERIALIZED VIEW monotonic_top1_view2;
materialize.public.monotonic_top1_view2 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"monotonic_top1_view2\\" IN CLUSTER \\"default\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"monotonic_top1_source\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" ASC NULLS FIRST LIMIT 1"
materialize.public.monotonic_top1_view2 "CREATE MATERIALIZED VIEW \\"materialize\\".\\"public\\".\\"monotonic_top1_view2\\" IN CLUSTER \\"{self._default_cluster()}\\" AS SELECT \\"f1\\", \\"pg_catalog\\".\\"count\\"(\\"f1\\") FROM \\"materialize\\".\\"public\\".\\"monotonic_top1_source\\" GROUP BY \\"f1\\" ORDER BY \\"f1\\" ASC NULLS FIRST LIMIT 1"

> SELECT * FROM monotonic_top1_view2;
A 1
Expand Down
9 changes: 9 additions & 0 deletions misc/python/materialize/checks/checks.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,15 @@ def _unsafe_schema(self) -> str:
else:
return "mz_internal"

def _default_cluster(self) -> str:
"""
:return: name of the cluster created in all environments.
"""
if self.base_version >= MzVersion.parse_mz("v0.82.0-dev"):
return "quickstart"
else:
return "default"

def initialize(self) -> Testdrive:
return Testdrive(TESTDRIVE_NOP)

Expand Down
35 changes: 27 additions & 8 deletions misc/python/materialize/checks/mzcompose_actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ class ConfigureMz(MzcomposeAction):
def __init__(self, scenario: "Scenario", mz_service: str | None = None) -> None:
self.handle: Any | None = None
self.mz_service = mz_service
self.scenario = scenario

def execute(self, e: Executor) -> None:
input = dedent(
Expand Down Expand Up @@ -159,7 +160,13 @@ def execute(self, e: Executor) -> None:
system_settings.add(
"GRANT CREATE ON SCHEMA materialize.public TO materialize;"
)
system_settings.add("GRANT CREATE ON CLUSTER default TO materialize;")
if self.scenario.base_version() >= MzVersion.parse_mz("v0.82.0-dev"):
cluster_name = "quickstart"
else:
cluster_name = "default"
system_settings.add(
f"GRANT CREATE ON CLUSTER {cluster_name} TO materialize;"
)

if (
MzVersion.parse_mz("v0.58.0-dev")
Expand Down Expand Up @@ -240,12 +247,16 @@ def execute(self, e: Executor) -> None:
port=6877,
user="mz_system",
)
if self.base_version >= MzVersion.parse_mz("v0.82.0-dev"):
cluster_name = "quickstart"
else:
cluster_name = "default"

c.sql(
f"""
ALTER CLUSTER default SET (MANAGED = false);
DROP CLUSTER REPLICA default.r1;
CREATE CLUSTER REPLICA default.r1
ALTER CLUSTER {cluster_name} SET (MANAGED = false);
DROP CLUSTER REPLICA {cluster_name}.r1;
CREATE CLUSTER REPLICA {cluster_name}.r1
{storage_addresses},
COMPUTECTL ADDRESSES ['clusterd_compute_1:2101'],
COMPUTE ADDRESSES ['clusterd_compute_1:2102'],
Expand Down Expand Up @@ -318,14 +329,22 @@ def execute(self, e: Executor) -> None:


class DropCreateDefaultReplica(MzcomposeAction):
def __init__(self, scenario: "Scenario") -> None:
self.base_version = scenario.base_version()

def execute(self, e: Executor) -> None:
c = e.mzcompose_composition()

if self.base_version >= MzVersion.parse_mz("v0.82.0-dev"):
cluster_name = "quickstart"
else:
cluster_name = "default"

c.sql(
"""
ALTER CLUSTER default SET (MANAGED = false);
DROP CLUSTER REPLICA default.r1;
CREATE CLUSTER REPLICA default.r1 SIZE '1';
f"""
ALTER CLUSTER {cluster_name} SET (MANAGED = false);
DROP CLUSTER REPLICA {cluster_name}.r1;
CREATE CLUSTER REPLICA {cluster_name}.r1 SIZE '1';
""",
port=6877,
user="mz_system",
Expand Down
2 changes: 1 addition & 1 deletion misc/python/materialize/checks/scenarios.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ def actions(self) -> list[Action]:
StartMz(self),
Initialize(self),
Manipulate(self, phase=1),
DropCreateDefaultReplicaAction(),
DropCreateDefaultReplicaAction(self),
Manipulate(self, phase=2),
Validate(self),
]
Expand Down
2 changes: 1 addition & 1 deletion misc/python/materialize/zippy/mz_actions.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def run(self, c: Composition) -> None:

c.sql(
"""
ALTER CLUSTER default SET (MANAGED = false);
ALTER CLUSTER quickstart SET (MANAGED = false);
""",
user="mz_system",
port=6877,
Expand Down
Loading