diff --git a/docs/v0.15.0/antora.yml b/docs/v0.15.0/antora.yml
new file mode 100644
index 00000000..0eaf9cb1
--- /dev/null
+++ b/docs/v0.15.0/antora.yml
@@ -0,0 +1,10 @@
+name: turtles
+title: Rancher Turtles
+version: next
+display_version: 'Next'
+start_page: en:index.adoc
+asciidoc:
+ attributes:
+ product_name: Rancher Turtles
+nav:
+ - modules/en/nav.adoc
diff --git a/docs/v0.15.0/modules/en/images/30000ft_view.png b/docs/v0.15.0/modules/en/images/30000ft_view.png
new file mode 100644
index 00000000..7441fa1b
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/30000ft_view.png differ
diff --git a/docs/v0.15.0/modules/en/images/capi_logo.svg b/docs/v0.15.0/modules/en/images/capi_logo.svg
new file mode 100644
index 00000000..2133449f
--- /dev/null
+++ b/docs/v0.15.0/modules/en/images/capi_logo.svg
@@ -0,0 +1 @@
+kubernetes-cluster-logos_final
\ No newline at end of file
diff --git a/docs/v0.15.0/modules/en/images/deployments-turtles.png b/docs/v0.15.0/modules/en/images/deployments-turtles.png
new file mode 100644
index 00000000..c165360a
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/deployments-turtles.png differ
diff --git a/docs/v0.15.0/modules/en/images/favicon.ico b/docs/v0.15.0/modules/en/images/favicon.ico
new file mode 100644
index 00000000..630449c0
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/favicon.ico differ
diff --git a/docs/v0.15.0/modules/en/images/gh_clone.png b/docs/v0.15.0/modules/en/images/gh_clone.png
new file mode 100644
index 00000000..1909eba2
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/gh_clone.png differ
diff --git a/docs/v0.15.0/modules/en/images/image.png b/docs/v0.15.0/modules/en/images/image.png
new file mode 100644
index 00000000..2f4da268
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/image.png differ
diff --git a/docs/v0.15.0/modules/en/images/image1554.png b/docs/v0.15.0/modules/en/images/image1554.png
new file mode 100644
index 00000000..0a074a55
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/image1554.png differ
diff --git a/docs/v0.15.0/modules/en/images/in_cluster_topology.png b/docs/v0.15.0/modules/en/images/in_cluster_topology.png
new file mode 100644
index 00000000..7be5f6c9
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/in_cluster_topology.png differ
diff --git a/docs/v0.15.0/modules/en/images/install-turtles-from-ui.gif b/docs/v0.15.0/modules/en/images/install-turtles-from-ui.gif
new file mode 100644
index 00000000..80f6880e
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/install-turtles-from-ui.gif differ
diff --git a/docs/v0.15.0/modules/en/images/intro.png b/docs/v0.15.0/modules/en/images/intro.png
new file mode 100644
index 00000000..49d88593
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/intro.png differ
diff --git a/docs/v0.15.0/modules/en/images/logo.svg b/docs/v0.15.0/modules/en/images/logo.svg
new file mode 100644
index 00000000..e65404a5
--- /dev/null
+++ b/docs/v0.15.0/modules/en/images/logo.svg
@@ -0,0 +1,106 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/v0.15.0/modules/en/images/ns.png b/docs/v0.15.0/modules/en/images/ns.png
new file mode 100644
index 00000000..b9b7ff41
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/ns.png differ
diff --git a/docs/v0.15.0/modules/en/images/rancher-logo-cow-blue.svg b/docs/v0.15.0/modules/en/images/rancher-logo-cow-blue.svg
new file mode 100644
index 00000000..8353dec6
--- /dev/null
+++ b/docs/v0.15.0/modules/en/images/rancher-logo-cow-blue.svg
@@ -0,0 +1,26 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/v0.15.0/modules/en/images/sidebar.png b/docs/v0.15.0/modules/en/images/sidebar.png
new file mode 100644
index 00000000..a7371d20
Binary files /dev/null and b/docs/v0.15.0/modules/en/images/sidebar.png differ
diff --git a/docs/v0.15.0/modules/en/nav.adoc b/docs/v0.15.0/modules/en/nav.adoc
new file mode 100644
index 00000000..8ec1c3fc
--- /dev/null
+++ b/docs/v0.15.0/modules/en/nav.adoc
@@ -0,0 +1,53 @@
+* Getting Started
+** xref:index.adoc[Introduction]
+** xref:getting-started/rancher.adoc[Rancher Setup]
+** Install {product_name}
+*** xref:getting-started/install-rancher-turtles/using_rancher_dashboard.adoc[Via Rancher Dashboard]
+*** xref:getting-started/install-rancher-turtles/using_helm.adoc[Via Helm Install]
+** Your first cluster
+*** xref:getting-started/create-first-cluster/intro.adoc[Introduction]
+*** xref:getting-started/create-first-cluster/using_fleet.adoc[Create & import your first cluster using Fleet]
+*** xref:getting-started/create-first-cluster/using_kubectl.adoc[Create & Import Your First Cluster Using kubectl]
+** Using ClusterClass
+*** xref:getting-started/cluster-class/intro.adoc[Introduction]
+*** xref:getting-started/cluster-class/create_cluster.adoc[Create a cluster using Fleet]
+** xref:getting-started/air-gapped-environment.adoc[Air-gapped environment]
+** xref:getting-started/uninstall_turtles.adoc[Uninstall {product_name}]
+* Reference Guides
+** Architecture
+*** xref:reference-guides/architecture/intro.adoc[Introduction]
+*** xref:reference-guides/architecture/components.adoc[Components]
+*** xref:reference-guides/architecture/deployment.adoc[Deployment Scenarios]
+** xref:reference-guides/rancher-turtles-chart/values.adoc[Chart configuration]
+** CAPI Providers
+*** xref:reference-guides/providers/certified.adoc[Certified CAPI Providers]
+*** xref:reference-guides/providers/howto.adoc[Create & import a cluster using CAPI providers]
+*** xref:reference-guides/providers/addon-provider-fleet.adoc[Cluster API Addon Provider Fleet]
+** Test suite
+*** xref:reference-guides/test-suite/intro.adoc[Introduction]
+*** xref:reference-guides/test-suite/usage.adoc[Test suite guide]
+* Tasks
+** xref:tasks/intro.adoc[Introduction]
+** Cluster API Operator
+*** xref:tasks/capi-operator/basic_cluster_api_provider_installation.adoc[Basic Cluster API Provider Installation]
+*** xref:tasks/capi-operator/installing_core_provider.adoc[Installing the CoreProvider using CAPIProvider resource]
+*** xref:tasks/capi-operator/capiprovider_resource.adoc[CAPIProvider Resource]
+*** xref:tasks/capi-operator/clusterctlconfig_resource.adoc[ClusterctlConfig Resource]
+*** xref:tasks/capi-operator/add_infrastructure_provider.adoc[Installing AWS Infrastructure Provider using CAPIProvider resource]
+** Maintenance
+*** xref:tasks/maintenance/early_adopter_upgrade.adoc[Upgrade Instructions for Early Adopters]
+*** xref:tasks/maintenance/import_controller_upgrade.adoc[Upgrade Turtles import controller]
+*** xref:tasks/maintenance/automigrate_to_v3_import.adoc[Auto-migration to v3 cluster import]
+** Provider Certification
+*** xref:tasks/provider-certification/intro.adoc[What is a Certified Provider?]
+*** xref:tasks/provider-certification/process.adoc[Provider Certification Guide]
+* Developer Guide
+** xref:developer-guide/intro.adoc[Introduction]
+** xref:developer-guide/install_capi_operator.adoc[Installing Cluster API Operator]
+** xref:developer-guide/development.adoc[Development setup]
+** xref:developer-guide/contributing_guidelines.adoc[Guidelines]
+* Reference
+** xref:reference/intro.adoc[Introduction]
+** xref:reference/glossary.adoc[Glossary]
+* Security
+** xref:security/slsa.adoc[SLSA]
\ No newline at end of file
diff --git a/docs/v0.15.0/modules/en/pages/developer-guide/contributing_guidelines.adoc b/docs/v0.15.0/modules/en/pages/developer-guide/contributing_guidelines.adoc
new file mode 100644
index 00000000..7c432aa8
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/developer-guide/contributing_guidelines.adoc
@@ -0,0 +1,279 @@
+= Guidelines
+
+// START doctoc generated TOC please keep comment here to allow auto update
+
+// DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE
+
+* <<_how_to_get_involved,How to get involved?>>
+* <<_submitting_prs,Submitting PRs>>
+ ** <<_choosing_something_to_work_on,Choosing something to work on>>
+ ** <<_developing_rancher_turtles,Developing rancher-turtles>>
+ ** <<_asking_for_help,Asking for help>>
+ ** <<_pr_submission_guidelines,PR submission guidelines>>
+ *** <<_commit_message_formatting,Commit message formatting>>
+* <<_opening_issues,Opening Issues>>
+* <<_how_the_maintainers_process_contributions,How the Maintainers process contributions>>
+ ** <<_prioritizing_issues,Prioritizing issues>>
+ ** <<_reviewing_prs,Reviewing PRs>>
+* <<_adrs_architectural_decision_records,ADRs (Architectural Decision Records)>>
+ ** <<_process,Process>>
+
+// END doctoc generated TOC please keep comment here to allow auto update
+
+Thank you for taking the time to contribute to Rancher CAPI extension https://github.com/rancher?q=turtles&type=all&language=&sort=[projects].
+
+Improvements to all areas of the project; from code, to documentation;
+from bug reports to feature design and UI enhancement are gratefully welcome.
+This guide should cover all aspects of how to interact with the project
+and how to get involved in development as smoothly as possible.
+
+Reading docs is often tedious, so let's put the important contributing rule
+right at the top: *Always be kind!*
+
+Looking forward to seeing your contributions in the repo!
+
+== How to get involved?
+
+We'd love to accept your patches in pretty much all areas of projects development!
+
+If you're a new to the project and want to help, but don't know where to start, here is a non-exhaustive list of ways you can help out:
+
+. Submit a <<_submitting_prs,Pull Request>>
++
+Beyond fixing bugs and submitting new features, there are other things you can submit
+ which, while less flashy, will be deeply appreciated by all who interact with the codebase:
+
+ ** Extending test coverage!
+ ** Refactoring!
+ ** Reviewing and updating https://rancher.github.io/turtles-docs/[documentation]!
+ ** Adding a new UI functionality!
+
++
+(See also <<_choosing_something_to_work_on,Choosing something to work on>> below.)
+
+. Open an <<_opening_issues,issue>>
++
+We have 2 forms of issues: bug reports and feature requests. If you are not sure which category you need, just make the best guess and provide as much information as possible.
+
+. Interested in helping to improve:
+
+* Rancher CAPI extension backend? Chime in on https://github.com/rancher/turtles/issues?q=is%3Aopen+is%3Aissue+label%3Akind%2Fbug+[`bugs`] or
+ https://github.com/rancher/turtles/labels/help-wanted[`help wanted` issues].
+ If you are seeking to take on a bigger challenge or a more experienced contributor, check out https://github.com/rancher/turtles/issues?q=is%3Aopen+is%3Aissue+label%3Akind%2Ffeature[`feature requests`].
+* extension UI? Take a look at https://github.com/rancher/capi-ui-extension[`open`] or
+ https://github.com/rancher/capi-ui-extension/labels/help-wanted[`help wanted` issues].
+* maybe, user-docs? Then, jump straight into https://github.com/rancher/turtles-docs/issues[`open` issues] in the docs repository.
+
+== Opening Issues
+
+These guides aim to help you write issues in a way which will ensure that they are processed
+as quickly as possible.
+
+_See below for <<_prioritizing_issues,how issues are prioritized>>_.
+
+*General rules*:
+
+. Before opening anything, take a good look through existing issues.
+. More is more: give as much information as it is humanly possible to give.
+ Highly detailed issues are more likely to be picked up because they can be prioritized and
+ scheduled for work faster. They are also more accessible
+ to the community, meaning that you may not have to wait for the core team to get to it.
+. Please do not open an issue with a description that is *just* a link to another issue,
+ a link to a slack conversation, a quote from either one of those, or anything else
+ equally opaque. This raises the bar for entry and makes it hard for the community
+ to get involved. Take the time to write a proper description and summarise key points.
+. Take care with formatting. Ensure the https://docs.github.com/en/free-pro-team@latest/github/writing-on-github/getting-started-with-writing-and-formatting-on-github[markdown is tidy],
+ use https://docs.github.com/en/free-pro-team@latest/github/writing-on-github/creating-and-highlighting-code-blocks[code blocks] etc etc.
+ The faster something can be read, the faster it can be dealt with.
+. Keep it civil. Yes, it is annoying when things don't work, but it is way more fun helping out
+ someone who is not... the worst. Remember that conversing via text exacerbates
+ everyone's negativity bias, so throw in some emoji when in doubt.
+
+== Submitting PRs
+
+=== Choosing something to work on
+
+If you are here to ask for help or request some new behaviour, this
+is the section for you. We have curated a set of issues for anyone who simply
+wants to build up their open-source cred.
+
+* Issues labelled https://github.com/search?q=org%3Agithub%2Francher+repo%3Arancher%2Fturtles+repo%3Arancher%2Fcapi-ui-extension+repo%3Arancher%2Fturtles-docs+is%3Aopen+label%3A%22good+first+issue%22+&type=issues&ref=advsearch[`good first issues`]
+should be accessible to folks new to the repos, as well as to open source in general.
++
+These issues should present a low/non-existent barrier to entry with a thorough description,
+easy-to-follow reproduction (if relevant) and enough context for anyone to pick up.
+The objective should be clear, possibly with a suggested solution or some pseudocode.
+If anything similar has been done, that work should be linked.
++
+If you have come across an issue tagged with `good first issue` which you think you would
+like to claim but isn't 100% clear, please ask for more info! When people write issues
+there is a _lot_ of assumed knowledge which is very often taken for granted. This is
+something we could all get better at, so don't be shy in asking for what you need
+to do great work.
++
+See more on <<_asking_for_help,asking for help>> below!
+
+* https://github.com/search?q=org%3Agithub%2Francher+repo%3Arancher%2Fturtles+repo%3Arancher%2Fcapi-ui-extension+repo%3Arancher%2Fturtles-docs+is%3Aopen+label%3A%22help+wanted%22+&type=issues&ref=advsearch[`help wanted` issues]
+are for those a little more familiar with the code base, but should still be accessible enough
+to newcomers.
+* All other issues labelled `kind/` or `area/` are also up for grabs, but
+are likely to require a fair amount of context.
+
+=== Developing rancher-turtles
+
+Check out the dedicated xref:../developer-guide/development.adoc[notes] on getting started with development.
+
+=== Asking for help
+
+If you need help at any stage of your work, please don't hesitate to ask!
+
+* To get more detail on the issue you have chosen, it is a good idea to start by asking
+whoever created it to provide more information.
+* If you are struggling with something while working on your PR, or aren't quite
+sure of your approach, you can open a https://github.blog/2019-02-14-introducing-draft-pull-requests/[draft]
+(prefix the title with `WIP:`) and explain what you are thinking.
+
+=== PR submission guidelines
+
+. Fork the desired repo, develop and test your code changes.
+. Push your changes to the branch on your fork and submit a pull request to the original repository
+against the `main` branch.
+
+[source,bash]
+----
+git push
+----
+
+. Submit a pull request.
+ .. All code PR must be labeled with one of
+ *** ⚠️ (`:warning:`, major or breaking changes)
+ *** ✨ (`:sparkles:`, feature additions)
+ *** 🐛 (`:bug:`, patch and bugfixes)
+ *** 📖 (`:book:`, documentation or proposals)
+ *** 🌱 (`:seedling:`, minor or other)
+
+Where possible, please squash your commits to ensure a tidy and descriptive history.
+
+If your PR is still a work in progress, please open a https://github.blog/2019-02-14-introducing-draft-pull-requests/[Draft PR]
+and prefix your title with the word `WIP`. When your PR is ready for review, you
+can change the title and remove the Draft setting.
+
+We recommend that you regularly rebase from `main` of the original repo to keep your
+branch up to date.
+
+In general, we will merge a PR once a maintainer has reviewed and approved it.
+Trivial changes (e.g., corrections to spelling) may get waved through.
+For substantial changes, more people may become involved, and you might get asked to resubmit the PR or divide the changes into more than one PR.
+
+==== Commit message formatting
+
+_For more on how to write great commit messages, and why you should, check out
+https://chris.beams.io/posts/git-commit/[this excellent blog post]._
+
+We follow a rough convention for commit messages that is designed to answer three
+questions: what changed, why was the change made, and how did you make it.
+
+The subject line should feature the _what_ and
+the body of the commit should describe the _why_ and _how_.
+If you encountered any weirdness along the way, this is a good place
+to note what you discovered and how you solved it.
+
+The format can be described more formally as follows:
+
+[source,text]
+----
+
+
+
+
+
+
+----
+
+The first line is the subject and should be no longer than 70 characters, the
+second line is always blank, and other lines should be wrapped at a max of 80 characters.
+This allows the message to be easier to read on GitHub as well as in various git tools.
+
+There is a template recommend for use https://gist.github.com/yitsushi/656e68c7db141743e81b7dcd23362f1a[here].
+
+== How the Maintainers process contributions
+
+=== Prioritizing issues
+
+The core team regularly processes incoming issues. There may be some delay over holiday periods.
+
+Every issue will be assigned a `priority/` label. The levels of priorities are:
+
+* https://github.com/rancher/turtles/labels/priority%2Fcritical-urgent[`critical-urgent`]: These are time sensitive issues which should be picked up immediately.
+If an issue labelled `critical` is not assigned or being actively worked on,
+someone is expected to drop what they're doing immediately to work on it.
+This usually means the core team, but community members are welcome to claim
+issues at any priority level if they get there first. _However, given the pressing
+timeframe, should a non-core contributor request to be assigned to a `critical` issue,
+they will be paired with a core team-member to manage the tracking, communication and release of any fix
+as well as to assume responsibility of all progess._
+* https://github.com/rancher/turtles/labels/priority%2Fimportant-soon[`important-soon`]: Must be assigned as soon as capacity becomes available.
+Ideally something should be delivered in time for the next release.
+* https://github.com/rancher/turtles/labels/priority%2Fimportant-longterm[`important-longterm`]: Important over the long term, but may not be currently
+staffed and/or may require multiple releases to complete.
+* https://github.com/rancher/turtles/labels/priority%2Fbacklog[`backlog`]: There appears to be general agreement that this would be good to have,
+but we may not have anyone available to work on it right now or in the immediate future.
+PRs are still very welcome, although it might take a while to get them reviewed if
+reviewers are fully occupied with higher priority issues, for example immediately before a release.
+
+These priority categories have been inspired by https://github.com/kubernetes/community/blob/master/contributors/guide/issue-triage.md[the Kubernetes contributing guide].
+
+Other labels include:
+
+* https://github.com/rancher/turtles/labels/adr-required[`adr-required`]:
+Indicates that the issue or PR contains a decision that needs to be documented in a <<_adrs_architectural_decision_records,ADR>> _before_
+it can be worked on.
+* https://github.com/rancher/turtles/labels/needs-investigation[`needs-investigation`]: There is currently insufficient information to either categorize properly,
+or to understand and implement a solution. This could be because the issue opener did
+not provide enough relevant information, or because more in-depth research is required
+before work can begin.
+
+=== Reviewing PRs
+
+The core team aims to clear the PR queue as quickly as possible. Community members
+should also feel free to keep an eye on things and provide their own thoughts and expertise.
+
+High-value and/or high priority contributions will be processed as quickly as possible,
+while lower priority or nice-to-have things may take a little longer to get approved.
+
+To help facilitate a smoother and faster review, follow the guidelines <<_pr_submission_guidelines,above>>.
+Submissions which do not meet standards will be de-prioritised for review.
+
+== ADRs (Architectural Decision Records)
+
+[NOTE]
+====
+Please feel free to skip <<_adrs_architectural_decision_records,this>> and <<_process,sub-section>> below, since they are only relevant to the https://github.com/rancher/turtles[rancher-turtles] repository.
+====
+
+
+Any impactful decisions to the architecture, design, development and behaviour
+of rancher-turtles must be recorded in the form of an https://engineering.atspotify.com/2020/04/14/when-should-i-write-an-architecture-decision-record/[ADR].
+
+A template can be found at https://github.com/rancher/turtles/blob/main/docs/adr/0000-template.md[`docs/adr/0000-template.md`] of the repo,
+with numerous examples of completed records in the same directory.
+
+Contributors are also welcome to backfill ADRs if they are found to be missing.
+
+=== Process
+
+. Start a new https://github.com/rancher/turtles/discussions/new?category=adr[discussion] using the `ADR` category.
+. Choose an appropriate clear and concise title (e.g. `ADR: Implement X in Go`).
+. Provide a context of the decision to be made. Describe
+ the various options, if more than one, and explain the pros and cons. Highlight
+ any areas which you would like the reviewers to pay attention to, or those on which
+ you would specifically like an opinion.
+. Tag in the https://github.com/rancher/turtles/blob/main/CODEOWNERS[maintainers] as the "Deciders", and invite them to
+ participate and weigh in on the decision and its consequences.
+. Once a decision has been made, open a PR adding a new ADR to the https://github.com/rancher/turtles/blob/main/docs/adr[directory].
+ Copy and complete the https://github.com/rancher/turtles/blob/main/docs/adr/0000-template.md[template];
+ ** Increment the file number by one
+ ** Set the status as "Accepted"
+ ** Set the deciders as those who approved the discussion outcome
+ ** Summarise the decision and consequences from the discussion thread
+ ** Link back to the discussion from the ADR doc
diff --git a/docs/v0.15.0/modules/en/pages/developer-guide/development.adoc b/docs/v0.15.0/modules/en/pages/developer-guide/development.adoc
new file mode 100644
index 00000000..6aa8e68f
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/developer-guide/development.adoc
@@ -0,0 +1,57 @@
+= Development setup
+:sidebar_position: 3
+
+== Prerequisites:
+
+* https://kind.sigs.k8s.io/[kind]
+* https://helm.sh/[helm]
+* https://tilt.dev/[tilt]
+
+== Create a local development environment
+
+. Clone the https://github.com/rancher/turtles[{product_name}] repository locally
+. Create *tilt-settings.yaml*:
++
+[source,yaml]
+----
+{
+ "k8s_context": "k3d-rancher-test",
+ "default_registry": "ghcr.io/turtles-dev",
+ "debug": {
+ "turtles": {
+ "continue": true,
+ "port": 40000
+ }
+ }
+}
+----
++
+. Open a terminal in the root of the {product_name} repository
+. Run the following:
++
+[source,bash]
+----
+make dev-env
+
+# Or if you want to use a custom hostname for Rancher
+RANCHER_HOSTNAME=my.customhost.dev make dev-env
+----
++
+. When tilt has started, open a new terminal and start ngrok or inlets
++
+[source,bash]
+----
+kubectl port-forward --namespace cattle-system svc/rancher 10000:443
+ngrok http https://localhost:10000
+----
+
+== What happens when you run `make dev-env`?
+
+. A https://kind.sigs.k8s.io/[kind] cluster is created with the following https://github.com/rancher/turtles/blob/main/scripts/kind-cluster-with-extramounts.yaml[configuration].
+. xref:../developer-guide/install_capi_operator.adoc[Cluster API Operator] is installed using helm, which includes:
+ ** Core Cluster API controller
+ ** Kubeadm Bootstrap and Control Plane Providers
+ ** Docker Infrastructure Provider
+ ** Cert manager
+. `Rancher manager` is installed using helm.
+. `tilt up` is run to start the development environment.
diff --git a/docs/v0.15.0/modules/en/pages/developer-guide/install_capi_operator.adoc b/docs/v0.15.0/modules/en/pages/developer-guide/install_capi_operator.adoc
new file mode 100644
index 00000000..fc959818
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/developer-guide/install_capi_operator.adoc
@@ -0,0 +1,131 @@
+= Installing Cluster API Operator
+:sidebar_position: 2
+
+[CAUTION]
+====
+Installing Cluster API Operator by following this page (without it being a Helm dependency to {product_name}) is not a recommended installation method and intended only for local development purposes.
+====
+
+
+This section describes how to install `Cluster API Operator` in the Kubernetes cluster.
+
+== Installing Cluster API (CAPI) and providers
+
+`CAPI` and desired `CAPI` providers could be installed using the helm-based installation for https://github.com/kubernetes-sigs/cluster-api-operator[`Cluster API Operator`] or as a helm dependency for the `{product_name}`.
+
+=== Install manually with Helm (alternative)
+
+To install `Cluster API Operator` with version `1.7.3` of the `CAPI` + `Docker` provider using helm, follow these steps:
+
+. Add the Helm repository for the `Cluster API Operator`:
++
+[source,bash]
+----
+helm repo add capi-operator https://kubernetes-sigs.github.io/cluster-api-operator
+helm repo add jetstack https://charts.jetstack.io
+----
++
+. Update the Helm repository:
++
+[source,bash]
+----
+helm repo update
+----
++
+. Install the Cert-Manager:
++
+[source,bash]
+----
+helm install cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set installCRDs=true
+----
++
+. Install the `Cluster API Operator`:
++
+[source,bash]
+----
+helm install capi-operator capi-operator/cluster-api-operator
+ --create-namespace -n capi-operator-system
+ --set infrastructure=docker:v1.7.7
+ --set core=cluster-api:v1.7.7
+ --timeout 90s --wait # Core Cluster API with kubeadm bootstrap and control plane providers will also be installed
+----
+
+[NOTE]
+====
+`cert-manager` is a hard requirement for `CAPI` and `Cluster API Operator`*
+====
+
+
+To provide additional environment variables, enable feature gates, or supply cloud credentials, similar to `clusterctl` https://cluster-api.sigs.k8s.io/user/quick-start#initialization-for-common-providers[common provider], variables secret with `name` and a `namespace` of the secret could be specified for the `Cluster API Operator` as shown below.
+
+[source,bash]
+----
+helm install capi-operator capi-operator/cluster-api-operator
+ --create-namespace -n capi-operator-system
+ --set infrastructure=docker:v1.7.7
+ --set core=cluster-api:v1.7.7
+ --timeout 90s
+ --secret-name
+ --wait
+----
+
+Example secret data:
+
+[source,yaml]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ name: variables
+ namespace: default
+type: Opaque
+stringData:
+ CLUSTER_TOPOLOGY: "true"
+ EXP_CLUSTER_RESOURCE_SET: "true"
+----
+
+To select more than one desired provider to be installed together with the `Cluster API Operator`, the `--infrastructure` flag can be specified with multiple provider names separated by a comma. For example:
+
+[source,bash]
+----
+helm install ... --set infrastructure="docker:v1.7.7;aws:v2.6.1"
+----
+
+The `infrastructure` flag is set to `docker:v1.7.7;aws:v2.6.1`, representing the desired provider names. This means that the `Cluster API Operator` will install and manage multiple providers, `Docker` and `AWS`, with versions `v1.7.7` and `v2.6.1` respectively.
+
+The cluster is now ready to install {product_name}. The default behavior when installing the chart is to install Cluster API Operator as a Helm dependency. Since we decided to install it manually before installing {product_name}, the feature `cluster-api-operator.enabled` must be explicitly disabled as otherwise it would conflict with the existing installation. You can refer to xref:../developer-guide/install_capi_operator.adoc#_install_suse_rancher_prime_cluster_api_without_cluster_api_operator_as_a_helm_dependency[Install {product_name} without Cluster API Operator] to see next steps.
+
+[TIP]
+====
+For more fine-grained control of the providers and other components installed with CAPI, see the xref:../tasks/capi-operator/add_infrastructure_provider.adoc[Add the infrastructure provider] section.
+====
+
+=== Install {product_name} without `Cluster API Operator` as a Helm dependency
+
+[NOTE]
+====
+This option is only suitable for development purposes and not recommended for production environments.
+====
+
+
+The `rancher-turtles` chart is available in https://rancher.github.io/turtles and this Helm repository must be added before proceeding with the installation:
+
+[source,bash]
+----
+helm repo add turtles https://rancher.github.io/turtles
+helm repo update
+----
+
+and then it can be installed into the `rancher-turtles-system` namespace with:
+
+[source,bash]
+----
+helm install rancher-turtles turtles/rancher-turtles --version v0.13.0
+ -n rancher-turtles-system
+ --set cluster-api-operator.enabled=false
+ --set cluster-api-operator.cluster-api.enabled=false
+ --create-namespace --wait
+ --dependency-update
+----
+
+As you can see, we are telling Helm to ignore installing `cluster-api-operator` as a dependency.
diff --git a/docs/v0.15.0/modules/en/pages/developer-guide/intro.adoc b/docs/v0.15.0/modules/en/pages/developer-guide/intro.adoc
new file mode 100644
index 00000000..e3bd7cb4
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/developer-guide/intro.adoc
@@ -0,0 +1,4 @@
+= Introduction
+:sidebar_position: 0
+
+Everything you need to know about developing {product_name}.
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/air-gapped-environment.adoc b/docs/v0.15.0/modules/en/pages/getting-started/air-gapped-environment.adoc
new file mode 100644
index 00000000..493074b5
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/air-gapped-environment.adoc
@@ -0,0 +1,129 @@
+= Air-gapped environment
+:sidebar_position: 3
+
+{product_name} provides support for an air-gapped environment out-of-the-box by leveraging features of the Cluster API Operator, the required dependency for installing {product_name}.
+
+To provision and configure Cluster API providers, Turtles uses the *CAPIProvider* resource to allow managing Cluster API Operator manifests in a declarative way. Every field provided by the upstream CAPI Operator resource for the desired `spec.type` is also available in the `spec` of the *CAPIProvider* resouce.
+
+To install Cluster API providers in an air-gapped environment the following will need to be done:
+
+. Configure the Cluster API Operator for an air-gapped environment:
+ ** The operator chart will be fetched and stored as a part of the Turtles chart.
+ ** Provide image overrides for the operator from an accessible image repository.
+. Configure Cluster API providers for an air-gapped environment:
+ ** Provide fetch configuration for each provider from an accessible location (e.g., an internal github/gitlab server) or from pre-created ConfigMaps within the cluster.
+ ** Provide image overrides for each provider to pull images from an accessible image repository.
+. Configure {product_name} for an air-gapped environment:
+ ** Collect and publish {product_name} images and publish to the private registry. https://ranchermanager.docs.rancher.com/getting-started/installation-and-upgrade/other-installation-methods/air-gapped-helm-cli-install/publish-images#2-collect-the-cert-manager-image[Example of cert-manager installation for the reference].
+ ** Provide fetch configuration and image values for `core` and `caprke2` providers in xref:../reference-guides/rancher-turtles-chart/values.adoc#cluster-api-operator-values[values.yaml].
+ ** Provider image value for the Cluster API Operator helm chart dependency in https://github.com/kubernetes-sigs/cluster-api-operator/blob/main/hack/charts/cluster-api-operator/values.yaml#L26[values.yaml]. Image values specified with the cluster-api-operator key will be passed along to the Cluster API Operator.
+
+== Example Usage
+
+As an admin, I need to fetch the vSphere provider (CAPV) components from within the cluster because I am working in an air-gapped environment.
+
+In this example, there is a ConfigMap in the `capv-system` namespace that defines the components and metadata of the provider. It can be created manually or by running the following commands:
+
+[source,bash]
+----
+# Get the file contents from the GitHub release
+curl -L https://github.com/rancher-sandbox/cluster-api-provider-vsphere/releases/download/v1.11.2/infrastructure-components.yaml -o components.yaml
+curl -L https://github.com/rancher-sandbox/cluster-api-provider-vsphere/releases/download/v1.11.2/metadata.yaml -o metadata.yaml
+
+# Create the configmap from the files
+kubectl create configmap v1.11.2 --namespace=capv-system --from-file=components=components.yaml --from-file=metadata=metadata.yaml --dry-run=client -o yaml > configmap.yaml
+----
+
+This command example would need to be adapted to the provider and version you want to use. The resulting config map will look similar to the example below:
+
+[source,yaml]
+----
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ labels:
+ provider-components: vsphere
+ name: v1.11.2
+ namespace: capv-system
+data:
+ components: |
+ # Components for v1.11.2 YAML go here
+ metadata: |
+ # Metadata information goes here
+----
+
+A *CAPIProvider* resource will need to be created to represent the vSphere infrastructure provider. It will need to be configured with a `fetchConfig`. The label selector allows the operator to determine the available versions of the vSphere provider and the Kubernetes resources that need to be deployed (i.e. contained within ConfigMaps which match the label selector).
+
+Since the provider's version is marked as `v1.11.2`, the operator uses the components information from the ConfigMap with matching label to install the vSphere provider.
+
+[source,yaml]
+----
+apiVersion: turtles-capi.cattle.io/v1alpha1
+kind: CAPIProvider
+metadata:
+ name: vsphere
+ namespace: capv-system
+spec:
+ name: vsphere
+ type: infrastructure
+ version: v1.11.2
+ configSecret:
+ name: vsphere-variables
+ fetchConfig:
+ selector:
+ matchLabels:
+ provider-components: vsphere
+ deployment:
+ containers:
+ - name: manager
+ imageUrl: "registry.suse.com/rancher/cluster-api-vsphere-controller:v1.11.2"
+ variables:
+ CLUSTER_TOPOLOGY: "true"
+ EXP_CLUSTER_RESOURCE_SET: "true"
+ EXP_MACHINE_POOL: "true"
+----
+
+Additionally the *CAPIProvider* overrides the container image to use for the provider using the `deployment.containers[].imageUrl` field. This allows the operator to pull the image from a registry within the air-gapped environment.
+
+=== Situation when manifests do not fit into ConfigMap
+
+There is a limit on the https://kubernetes.io/docs/concepts/configuration/configmap/#motivation[maximum size] of a ConfigMap - 1MiB. If the manifests do not fit into this size, Kubernetes will generate an error and provider installation fail. To avoid this, you can archive the manifests and put them in the ConfigMap that way.
+
+For example, you have two files: `components.yaml` and `metadata.yaml`. To create a working config map you need:
+
+. Archive components.yaml using `gzip` cli tool
++
+[source,sh]
+----
+gzip -c components.yaml > components.gz
+----
++
+. Create a ConfigMap manifest from the archived data
++
+[source,sh]
+----
+kubectl create configmap v1.11.2 --namespace=capv-system --from-file=components=components.gz --from-file=metadata=metadata.yaml --dry-run=client -o yaml > configmap.yaml
+----
++
+. Edit the file by adding "provider.cluster.x-k8s.io/compressed: true" annotation
++
+[source,sh]
+----
+yq eval -i '.metadata.annotations += {"provider.cluster.x-k8s.io/compressed": "true"}' configmap.yaml
+----
++
+NOTE: without this annotation operator won't be able to determine if the data is compressed or not.
+
+. Add labels that will be used to match the ConfigMap in `fetchConfig` section of the provider
++
+[source,sh]
+----
+yq eval -i '.metadata.labels += {"my-label": "label-value"}' configmap.yaml
+----
++
+. Create a ConfigMap in your kubernetes cluster using kubectl
++
+[source,sh]
+----
+kubectl create -f configmap.yaml
+----
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/create_cluster.adoc b/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/create_cluster.adoc
new file mode 100644
index 00000000..8f803adb
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/create_cluster.adoc
@@ -0,0 +1,144 @@
+= Create a cluster using Fleet
+:sidebar_position: 2
+
+This section will guide you through creating a cluster that utilizes ClusterClass using a GitOps workflow with Fleet.
+
+[NOTE]
+====
+This guide uses the https://github.com/rancher-sandbox/rancher-turtles-fleet-example/tree/clusterclass[examples repository].
+====
+
+
+== Prerequisites
+
+* Rancher Manager cluster with {product_name} installed
+* Cluster API providers installed for your scenario - we'll be using the Docker infrastructure and Kubeadm bootstrap/control plane providers in these instructions - see https://cluster-api.sigs.k8s.io/user/quick-start.html#initialization-for-common-providers[Initialization for common providers]
+* The *ClusterClass* feature enabled - see xref:./intro.adoc[the introduction]
+
+== Configure Rancher Manager
+
+The clusterclass and cluster definitions will be imported into the Rancher Manager cluster (which is also acting as a Cluster API management cluster) using the *Continuous Delivery* feature (which uses Fleet).
+
+The guide will apply the manifests using a 2-step process. However, this isn't required and they could be combined into one step.
+
+There are 2 options to provide the configuration. The first is using the Rancher Manager UI and the second is by applying some YAML to your cluster. Both are covered below.
+
+=== Import ClusterClass Definitions
+
+[discrete]
+==== Using the Rancher Manager UI
+
+. Go to Rancher Manager
+. Select *Continuos Delivery* from the menu:
+. Select *fleet-local* as the namespace from the top right
+. Select *Git Repos* from the sidebar
+. Click *Add Repository*
+. Enter *classes* as the name
+. Get the *HTTPS* clone URL from your git repo
+. Add the URL into the *Repository URL* field
+. Change the branch name to *clusterclass*
+. Click *Add Path*
+. Enter `/classes`
+. Click *Next*
+. Click *Create*
+. Click on the *clusters* name
+. Watch the resources become ready
+
+=== Using kubectl
+
+. Get the *HTTPS* clone URL from your git repo
+. Create a new file called *repo.yaml*
+. Add the following contents to the new file:
++
+[source,yaml]
+----
+apiVersion: fleet.cattle.io/v1alpha1
+kind: GitRepo
+metadata:
+ name: classes
+ namespace: fleet-local
+spec:
+ branch: clusterclass
+ repo: https://github.com/rancher-sandbox/rancher-turtles-fleet-example.git
+ paths:
+ - /classes
+ targets: []
+----
++
+. Apply the file to the Rancher Manager cluster using *kubectl*:
++
+[source,bash]
+----
+kubectl apply -f repo.yaml
+----
++
+. Go to Rancher Manager
+. Select *Continuos Delivery* from the side bar
+. Select *fleet-local* as the namespace from the top right
+. Select *Git Repos* from the sidebar
+. Click on the *clusters* name
+. Watch the resources become ready
+. Select *Cluster Management* from the menu
+. Check your cluster has been imported
+
+=== Import Cluster Definitions
+
+Now the classes have been imported its possible to use them with cluster definitions.
+
+[discrete]
+==== Using the Rancher Manager UI
+
+. Go to Rancher Manager
+. Select *Continuos Delivery* from the menu:
+. Select *fleet-local* as the namespace from the top right
+. Select *Git Repos* from the sidebar
+. Click *Add Repository*
+. Enter *clusters* as the name
+. Get the *HTTPS* clone URL from your git repo
+. Add the URL into the *Repository URL* field
+. Change the branch name to *clusterclass*
+. Click *Add Path*
+. Enter `/clusters`
+. Click *Next*
+. Click *Create*
+. Click on the *clusters* name
+. Watch the resources become ready
+. Select *Cluster Management* from the menu
+. Check your cluster has been imported
+
+=== Using kubectl
+
+. Get the *HTTPS* clone URL from your git repo
+. Create a new file called *repo.yaml*
+. Add the following contents to the new file:
++
+[source,yaml]
+----
+apiVersion: fleet.cattle.io/v1alpha1
+kind: GitRepo
+metadata:
+ name: clusters
+ namespace: fleet-local
+spec:
+ branch: clusterclass
+ repo: https://github.com/rancher-sandbox/rancher-turtles-fleet-example.git
+ paths:
+ - /clusters
+ targets: []
+----
++
+. Apply the file to the Rancher Manager cluster using *kubectl*:
++
+[source,bash]
+----
+kubectl apply -f repo.yaml
+----
++
+. Go to Rancher Manager
+. Select *Continuos Delivery* from the side bar
+. Select *fleet-local* as the namespace from the top right
+. Select *Git Repos* from the sidebar
+. Click on the *classes* name
+. Watch the resources become ready
+. Select *Cluster Management* from the menu
+. Check your cluster has been imported
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/intro.adoc b/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/intro.adoc
new file mode 100644
index 00000000..681fbc3a
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/intro.adoc
@@ -0,0 +1,41 @@
+= Introduction
+:sidebar_position: 1
+
+In this section we cover using *ClusterClass* with {product_name}.
+
+[CAUTION]
+====
+ClusterClass is an experimental feature of Cluster API. As with any experimental features it should be used with caution as it may be unreliable. All experimental features are not subject to any compatibility or deprecation promise. **It is planned to be graduated with https://github.com/kubernetes-sigs/cluster-api/milestone/38[CAPI v1.9]**.
+====
+
+[TIP]
+====
+Before using ClusterClass, study the provider docs and confirm that the feature is supported. This documentation includes a matrix in the xref:../../reference-guides/providers/certified.adoc[Certified providers] section with xref:../../reference-guides/providers/certified.adoc#_clusterclass_support_for_certified_providers[cluster topology support status].
+====
+
+== Pre-requisities
+
+To use ClusterClass it needs to be enabled for core Cluster API and any provider that supports it. This is done by setting the `CLUSTER_TOPOLOGY` variable to `true`.
+
+The {product_name} Helm chart will automatically enable the feature when installing CAPI. However, when enabling additional providers, ensure `CLUSTER_TOPOLOGY` is set in the provider configuration. Turtles' xref:../tasks/capi-operator/capiprovider_resource.adoc[CAPIProvider] resource supports passing installation parameters to the provider via `variables` as follows:
+
+[source,yaml]
+----
+apiVersion: turtles-capi.cattle.io/v1alpha1
+kind: CAPIProvider
+metadata:
+ name: azure
+ namespace: capz-system
+spec:
+ type: infrastructure
+ name: azure
+ configSecret:
+ name: azure-variables
+ variables:
+ CLUSTER_TOPOLOGY: "true"
+ EXP_CLUSTER_RESOURCE_SET: "true"
+ EXP_MACHINE_POOL: "true"
+ EXP_AKS_RESOURCE_HEALTH: "true"
+----
+
+The resource defined in this yaml file will install CAPZ with support for a number of features, including `CLUSTER_TOPOLOGY`.
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/provision.adoc b/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/provision.adoc
new file mode 100644
index 00000000..3505ddea
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/cluster-class/provision.adoc
@@ -0,0 +1,109 @@
+= Create a workload cluster
+
+This section will guide you through creating a `ClusterClass` which you will then use to provision a workload `Cluster`. The Cluster API book includes a https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/[ClusterClass section] with detailed information on what is supported and how you can use the powerful abstraction this feature provides, including operations such as https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/write-clusterclass#clusterclass-with-patches[patching class values] and https://cluster-api.sigs.k8s.io/tasks/experimental-features/cluster-class/operate-cluster[operating a managed cluster]. We recommend you familiarize with the feature to get the most out of it.
+
+[NOTE]
+====
+This guide uses the https://github.com/rancher-sandbox/rancher-turtles-fleet-example/tree/templates[examples repository].
+====
+
+== Providers Guide
+
+=== Prerequisites
+
+- Rancher Manager cluster with Rancher Turtles installed
+- Configure cloud credentials for Azure in Rancher: `Cluster Management` > `Cloud Credentials`.
+ - Keep the name you assign to the new set of credentials.
+- Install the https://github.com/kubernetes-sigs/cluster-api-provider-azure/[CAPI Infrastructure Provider for Azure] using the xref:../tasks/capi-operator/basic_cluster_api_provider_installation.adoc[`CAPIProvider` resource].
+
+[source,yaml]
+----
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: capz-system
+---
+apiVersion: turtles-capi.cattle.io/v1alpha1
+kind: CAPIProvider
+metadata:
+ name: azure
+ namespace: capz-system
+spec:
+ type: infrastructure
+ name: azure
+ credentials:
+ rancherCloudCredential: # Rancher credentials secret for Azure
+ configSecret:
+ name: azure-variables
+ variables:
+ CLUSTER_TOPOLOGY: "true"
+ EXP_CLUSTER_RESOURCE_SET: "true"
+ EXP_MACHINE_POOL: "true"
+ EXP_AKS_RESOURCE_HEALTH: "true"
+----
+
+=== Create ClusterClass object
+
+The `ClusterClass` object represents a template that defines the shape of the control plane and infrastructure of a cluster. This is the base definition of the `Cluster` object/s that will be created from it. If the template is created optimizing flexibility, we could use it to provision workload clusters supporting variants of the same cluster shape, simplifying the configuration applied to each cluster, as the class removes most of the complexity.
+
+The template we're using in this example will use CAPZ to provision a managed Azure (AKS) cluster. Before applying the yaml file, you will need to export the following environment variables. Remember to adapt the values to your specific scenario as these are just placeholders:
+
+[source,bash]
+----
+export CLUSTER_CLASS_NAME="azure-sample"
+export CLUSTER_NAME="azure-aks-cluster"
+export AZURE_LOCATION="northeurope"
+export AZURE_SUBSCRIPTION_ID= # you can use: az account show --query 'id' --output tsv
+export KUBERNETES_VERSION="v1.30.4"
+export AZURE_CLIENT_ID=
+export AZURE_TENANT_ID=
+export AZURE_CLIENT_SECRET=
+----
+
+Using `envsubst` to substitute the exported variables in the original file.
+
+[source,bash]
+----
+curl -s https://raw.githubusercontent.com/rancher-sandbox/rancher-turtles-fleet-example/templates/capz/cluster-template-aks-clusterclass.yaml | envsubst >> clusterclass1.yaml
+----
+
+This will create a new yaml file `clusterclass1.yaml` that contains the class definition formatted with the exported values. You can study the resulting file before applying it to the cluster.
+
+[source,bash]
+----
+kubectl apply -f clusterclass1.yaml
+----
+
+You can validate that the class has been created successfully and inspect its content via `kubectl`:
+
+[source,bash]
+----
+kubectl get clusterclasses.cluster.x-k8s.io
+kubectl describe clusterclasses.cluster.x-k8s.io
+----
+
+=== Provision workload cluster
+
+Now that the class resource is available in the cluster, we can go ahead and create a cluster from this topology. Let's first substitute the variables in the template, as we did before:
+
+[source,bash]
+----
+curl -s https://raw.githubusercontent.com/rancher-sandbox/rancher-turtles-fleet-example/templates/capz/cluster-template-aks-topology.yaml | envsubst >> cluster1.yaml
+----
+
+This will create a new yaml file `cluster1.yaml` that contains the cluster definition formatted with the exported values. You can study the resulting file before applying it to the cluster, which will effectively trigger workload cluster creation.
+
+[source,bash]
+----
+kubectl apply -f cluster1.yaml
+----
+
+Be patient, cluster provisioning will take some time (up to 10min). While you wait for it to become ready, you can go through the `capz-controller-manager` logs, which is responsible for reconciling the cluster resources you just created.
+
+== Enable auto-import into Rancher
+
+As with any other CAPI clusters, you will have to enable auto-import for Turtles to manage importing it into Rancher Manager. Please, refer to xref:../../getting-started/create-first-cluster/using_fleet.adoc#_mark_namespace_for_auto_import[Mark namespace for auto-import] notes to enable auto-import.
+
+== Post provisioning actions
+
+The functionality provided by cluster classes makes it possible for you to deploy as many clusters as desired from the topology you created. This template can be written in a way that makes it flexible enough to be used in as many Clusters as possible by supporting variants of the same base Cluster shape.
\ No newline at end of file
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/intro.adoc b/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/intro.adoc
new file mode 100644
index 00000000..a806b5d8
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/intro.adoc
@@ -0,0 +1,9 @@
+= Introduction
+:sidebar_position: 1
+
+Everything you need to know about creating and importing your first CAPI cluster with {product_name}.
+
+Choose one of the following options:
+
+* xref:getting-started/create-first-cluster/using_fleet.adoc[If you are using Fleet]
+* xref:getting-started/create-first-cluster/using_kubectl.adoc[If you want to use kubectl]
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/using_fleet.adoc b/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/using_fleet.adoc
new file mode 100644
index 00000000..f3e0ee42
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/using_fleet.adoc
@@ -0,0 +1,151 @@
+= Create & import your first cluster using Fleet
+
+This section will guide you through creating your first cluster and importing it into Rancher Manager using a GitOps workflow with Fleet.
+
+== Prerequisites
+
+* Rancher Manager cluster with {product_name} installed
+* Cluster API providers installed for your scenario - we'll be using the https://github.com/kubernetes-sigs/cluster-api/tree/main/test/infrastructure/docker[Docker infrastructure] and https://github.com/rancher-sandbox/cluster-api-provider-rke2[RKE2 bootstrap/control plane] providers in these instructions - see xref:../../tasks/capi-operator/capiprovider_resource.adoc[Initialization for common providers using Turtles' `CAPIProvider`]
+
+== Create your cluster definition
+
+The **envsubst** can be used to generate the YAML for a cluster from a template, and substitute environment variables.
+
+You can craft the YAML for your cluster manually. If you decide to do this then you can use the **templates** that infrastructure providers publish as part of their releases. For example, the AWS provider https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/tag/v2.2.1[publishes files] prefixed with **cluster-template** that can be used as a base. You will need to replace any tokens yourself or by using clusterctl (e.g. `curl -s https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v2.2.1/cluster-template-eks.yaml | envsubst > cluster.yaml`).
+
+[TIP]
+====
+To maintain proper resource management and avoid accidental deletion of custom resources managed outside of Helm during Helm operations, include the `helm.sh/resource-policy": keep` annotation in the top-level CAPI kinds within your cluster manifests.
+====
+
+
+[NOTE]
+====
+This guide does not use ClusterClass. Templates that use ClusterClass will require that the experimental feature be enabled.
+====
+
+
+To generate the YAML for the cluster do the following (assuming the Docker infrastructure provider is being used):
+
+. Open a terminal and run the following:
++
+[source,bash]
+----
+export CLUSTER_NAME=cluster1
+export CONTROL_PLANE_MACHINE_COUNT=1
+export WORKER_MACHINE_COUNT=1
+export KUBERNETES_VERSION=v1.30.0
+
+curl -s https://raw.githubusercontent.com/rancher-sandbox/rancher-turtles-fleet-example/templates/docker-rke2.yaml | envsubst > cluster1.yaml
+----
++
+. View *cluster1.yaml* to ensure there are no tokens. You can make any changes you want as well.
+
+[TIP]
+====
+The Cluster API quickstart guide contains more detail. Read the steps related to this section https://cluster-api.sigs.k8s.io/user/quick-start.html#required-configuration-for-common-providers[here].
+====
+
+
+== Create your repo for Fleet
+
+. Create a new git repository (this guide uses GitHub)
+. Create a new folder called *clusters*
+. Move the *cluster1.yaml* file you generated in the last section to the *clusters* folder.
+. Create a file called *fleet.yaml* in the root and add the following contents
++
+[source,yaml]
+----
+namespace: default
+----
++
+. Commit the changes
+
+[NOTE]
+====
+The *fleet.yaml* is used to specify configuration options for fleet (see https://fleet.rancher.io/ref-fleet-yaml[docs] for further details). In this instance its declaring that the cluster definitions should be added to the *default* namespace
+====
+
+
+After the described steps there will be a repository created structure similar to the example: [https://github.com/rancher-sandbox/rancher-turtles-fleet-example]
+
+== Mark Namespace for auto-import
+
+To automatically import a CAPI cluster into Rancher Manager there are 2 options:
+
+. label a namespace so all clusters contained in it are imported.
+. label an individual cluster definition so that it's imported.
+
+In both cases the label is `cluster-api.cattle.io/rancher-auto-import`.
+
+This walkthrough will use the first option of importing all clusters in a namespace.
+
+. Open a terminal
+. Label the default namespace in your Rancher Manager cluster:
++
+[source,bash]
+----
+kubectl label namespace default cluster-api.cattle.io/rancher-auto-import=true
+----
+
+== Configure Rancher Manager
+
+Now the cluster definitions are committed to a git repository they can be used to provision the clusters. To do this they will need to be imported into the Rancher Manager cluster (which is also acting as a Cluster API management cluster) using the *Continuous Delivery* feature (which uses Fleet).
+
+There are 2 options to provide the configuration. The first is using the Rancher Manager UI and the second is by applying some YAML to your cluster. Both are covered below.
+
+=== Using the Rancher Manager UI
+
+. Go to Rancher Manager
+. Select *Continuos Delivery* from the menu:
+image:sidebar.png[sidebar]
+. Select *fleet-local* as the namespace from the top right
+image:ns.png[namespace]
+. Select *Git Repos* from the sidebar
+. Click *Add Repository*
+. Enter *clusters* as the name
+. Get the *HTTPS* clone URL from your git repo
+image:gh_clone.png[git clone url]
+. Add the URL into the *Repository URL* field
+. Change the branch name to *main*
+. Click *Next*
+. Click *Create*
+. Click on the *clusters* name
+. Watch the resources become ready
+. Select *Cluster Management* from the menu
+. Check your cluster has been imported
+
+=== Using kubectl
+
+. Get the *HTTPS* clone URL from your git repo
+. Create a new file called *repo.yaml*
+. Add the following contents to the new file:
++
+[source,yaml]
+----
+apiVersion: fleet.cattle.io/v1alpha1
+kind: GitRepo
+metadata:
+ name: clusters
+ namespace: fleet-local
+spec:
+ branch: main
+ repo:
+ targets: []
+----
++
+. Apply the file to the Rancher Manager cluster using *kubectl*:
++
+[source,bash]
+----
+kubectl apply -f repo.yaml
+----
++
+. Go to Rancher Manager
+. Select *Continuos Delivery* from the side bar
+. Select *fleet-local* as the namespace from the top right
+. Select *Git Repos* from the sidebar
+. Click on the *clusters* name
+. Watch the resources become ready
+. Select *Cluster Management* from the menu
+. Check your cluster has been imported
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/using_kubectl.adoc b/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/using_kubectl.adoc
new file mode 100644
index 00000000..1cbcac9c
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/create-first-cluster/using_kubectl.adoc
@@ -0,0 +1,60 @@
+= Create & Import Your First Cluster Using kubectl
+:sidebar_position: 3
+
+This section will guide you through creating your first cluster and importing it into Rancher Manager using kubectl.
+
+== Prerequisites
+
+* Rancher Manager cluster with {product_name} installed
+* Cluster API providers installed for your scenario - we'll be using the https://github.com/kubernetes-sigs/cluster-api/tree/main/test/infrastructure/docker[Docker infrastructure] and https://github.com/rancher-sandbox/cluster-api-provider-rke2[RKE2 bootstrap/control plane] providers in these instructions - see xref:../../tasks/capi-operator/capiprovider_resource.adoc[Initialization for common providers using Turtles' `CAPIProvider`]
+* *clusterctl* CLI - see the https://github.com/kubernetes-sigs/cluster-api/releases[releases]
+
+== Create Your Cluster Definition
+
+To generate the YAML for the cluster, do the following (assuming the Docker infrastructure provider is being used):
+
+. Open a terminal and run the following:
++
+[source,bash]
+----
+export CLUSTER_NAME=cluster1
+export CONTROL_PLANE_MACHINE_COUNT=1
+export WORKER_MACHINE_COUNT=1
+export KUBERNETES_VERSION=v1.30.0
+
+curl -s https://raw.githubusercontent.com/rancher-sandbox/rancher-turtles-fleet-example/templates/docker-rke2.yaml | envsubst > cluster1.yaml
+----
++
+. View *cluster1.yaml* to ensure there are no tokens. You can make any changes you want as well.
++
+____
+The Cluster API quickstart guide contains more detail. Read the steps related to this section https://cluster-api.sigs.k8s.io/user/quick-start.html#required-configuration-for-common-providers[here].
+____
++
+. Create the cluster using kubectl
++
+[source,bash]
+----
+kubectl create -f cluster1.yaml
+----
+
+== Mark Namespace or Cluster for Auto-Import
+
+To automatically import a CAPI cluster into Rancher Manager, there are 2 options:
+
+. Label a namespace so all clusters contained in it are imported.
+. Label an individual cluster definition so that it's imported.
+
+Labeling a namespace:
+
+[source,bash]
+----
+kubectl label namespace default cluster-api.cattle.io/rancher-auto-import=true
+----
+
+Labeling an individual cluster definition:
+
+[source,bash]
+----
+kubectl label cluster.cluster.x-k8s.io cluster1 cluster-api.cattle.io/rancher-auto-import=true
+----
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/install-rancher-turtles/using_helm.adoc b/docs/v0.15.0/modules/en/pages/getting-started/install-rancher-turtles/using_helm.adoc
new file mode 100644
index 00000000..b36ffc7e
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/install-rancher-turtles/using_helm.adoc
@@ -0,0 +1,100 @@
+= Via Helm install
+
+[CAUTION]
+====
+In case you need to review the list of prerequisites (including `cert-manager`), you can refer to xref:../index.adoc#_prerequisites[this table].
+====
+
+
+If you want to manually apply the Helm chart and be in full control of the installation.
+
+The Cluster API Operator is required for installing {product_name} and will be installed as dependency of the {product_name} Helm chart.
+
+CAPI Operator allows handling the lifecycle of Cluster API Providers using a declarative approach, extending the capabilities of `clusterctl`. If you want to learn more about it, you can refer to https://cluster-api-operator.sigs.k8s.io/[Cluster API Operator book].
+
+[IMPORTANT]
+====
+Before <<_install_suse_rancher_prime_cluster_api_with_cluster_api_operator_as_a_helm_dependency,installing {product_name}>> in your Rancher environment, Rancher's `embedded-cluster-api` functionality must be disabled. This includes also cleaning up Rancher-specific webhooks that otherwise would conflict with CAPI ones.
+
+To simplify setting up Rancher for installing {product_name}, the official {product_name} Helm chart includes a `pre-install` hook that applies these changes, making it transparent to the end user:
+
+* Disable the `embedded-cluster-api` feature in Rancher.
+* Delete the `mutating-webhook-configuration` and `validating-webhook-configuration` webhooks that are no longer needed.
+====
+
+
+If you would like to understand how {product_name} works and what the architecture looks like, you can refer to the xref:../../reference-guides/architecture/intro.adoc[Architecture] section.
+
+[NOTE]
+====
+If uninstalling, you can refer to xref:../getting-started/uninstall_turtles.adoc[Uninstalling {product_name}]
+====
+
+
+== Install {product_name} with `Cluster API Operator` as a Helm dependency
+
+The `rancher-turtles` chart is available in https://rancher.github.io/turtles and this Helm repository must be added before proceeding with the installation:
+
+[source,bash]
+----
+helm repo add turtles https://rancher.github.io/turtles
+helm repo update
+----
+
+As mentioned before, installing {product_name} requires the https://github.com/kubernetes-sigs/cluster-api-operator[Cluster API Operator] and the Helm chart can handle its installation automatically with a minimum set of flags:
+
+[source,bash]
+----
+helm install rancher-turtles turtles/rancher-turtles --version v0.13.0 \
+ -n rancher-turtles-system \
+ --dependency-update \
+ --create-namespace --wait \
+ --timeout 180s
+----
+
+This operation could take a few minutes and, after installing, you can take some time to study the installed controllers, including:
+
+* `rancher-turtles-controller`.
+* `capi-operator`.
+
+[NOTE]
+====
+
+* For a list of {product_name} versions, refer to https://github.com/rancher/turtles/releases[Releases page].
+====
+
+
+This is the basic, recommended configuration, which manages the creation of a secret containing the required CAPI feature flags (`CLUSTER_TOPOLOGY`, `EXP_CLUSTER_RESOURCE_SET` and `EXP_MACHINE_POOL` enabled) in the core provider namespace. These feature flags are required to enable additional Cluster API functionality.
+
+If you need to override the default behavior and use an existing secret (or add custom environment variables), you can pass the secret name helm flag. In this case, as a user, you are in charge of managing the secret creation and its content, including the minimum required features: `CLUSTER_TOPOLOGY`, `EXP_CLUSTER_RESOURCE_SET` and `EXP_MACHINE_POOL` enabled.
+
+[source,bash]
+----
+helm install ...
+ # Passing secret name and namespace for additional environment variables
+ --set cluster-api-operator.cluster-api.configSecret.name=
+----
+
+The following is an example of a user-managed secret `cluster-api-operator.cluster-api.configSecret.name=variables` with `CLUSTER_TOPOLOGY`, `EXP_CLUSTER_RESOURCE_SET` and `EXP_MACHINE_POOL` feature flags set and an extra custom variable:
+
+.secret.yaml
+[source, yaml]
+----
+apiVersion: v1
+kind: Secret
+metadata:
+ name: variables
+ namespace: rancher-turtles-system
+type: Opaque
+stringData:
+ CLUSTER_TOPOLOGY: "true"
+ EXP_CLUSTER_RESOURCE_SET: "true"
+ EXP_MACHINE_POOL: "true"
+ CUSTOM_ENV_VAR: "false"
+----
+
+[IMPORTANT]
+====
+For detailed information on the values supported by the chart and their usage, refer to xref:../reference-guides/rancher-turtles-chart/values.adoc[Helm chart options]
+====
+
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/install-rancher-turtles/using_rancher_dashboard.adoc b/docs/v0.15.0/modules/en/pages/getting-started/install-rancher-turtles/using_rancher_dashboard.adoc
new file mode 100644
index 00000000..b817ff8e
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/install-rancher-turtles/using_rancher_dashboard.adoc
@@ -0,0 +1,65 @@
+= Via Rancher Dashboard
+
+
+This is the recommended option for installing {product_name}.
+
+Via Rancher UI, and just by adding the Turtles repository, we can easily let Rancher take care of the installation and configuration of the Cluster API Extension.
+
+[CAUTION]
+====
+In case you need to review the list of prerequisites (including `cert-manager`), you can refer to xref:../index.adoc#_prerequisites[this table].
+====
+
+
+[IMPORTANT]
+====
+Before xref:./using_helm.adoc#_install_suse_rancher_prime_cluster_api_with_cluster_api_operator_as_a_helm_dependency[installing {product_name}] in your Rancher environment, Rancher's `embedded-cluster-api` functionality must be disabled. This includes also cleaning up Rancher-specific webhooks that otherwise would conflict with CAPI ones.
+
+To simplify setting up Rancher for installing {product_name}, the official {product_name} Helm chart includes a `pre-install` hook that applies these changes, making it transparent to the end user:
+
+* Disable the `embedded-cluster-api` feature in Rancher.
+* Delete the `mutating-webhook-configuration` and `validating-webhook-configuration` webhooks that are no longer needed.
+====
+
+
+If you would like to understand how {product_name} works and what the architecture looks like, you can refer to the xref:../../reference-guides/architecture/intro.adoc[Architecture] section.
+
+[NOTE]
+====
+If uninstalling, you can refer to xref:../getting-started/uninstall_turtles.adoc[Uninstalling {product_name}]
+====
+
+
+== Installation
+
+* From your browser, access Rancher Manager and explore the *local* cluster.
+* Using the left navigation panel, go to `Apps` \-> `Repositories`.
+* Click `Create` to add a new repository.
+* Enter the following:
+ ** *Name*: `turtles`.
+ ** *Index URL*: https://rancher.github.io/turtles.
+* Wait for the `turtles` repository to have a status of `Active`.
+* Go to `Apps` \-> `Charts`.
+* Filter for `turtles`.
+* Click `Rancher Turtles - the Cluster API Extension`
+* Click `Install` \-> `Next` \-> `Install`.
+
+[CAUTION]
+====
+Rancher will select not to install Turtles into a https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/manage-clusters/projects-and-namespaces[Project] by default. Installing Turtles into a Project is not supported and the default configuration `None` should be used to avoid unexpected behavior during installation.
+====
+
+
+image::install-turtles-from-ui.gif[install-turtles-from-ui]
+
+This will use the default values for the Helm chart, which are good for most installations. If your configuration requires overriding some of these defaults, you can either specify the values during installation from Rancher UI or, alternatively, you can opt for the xref:./using_helm.adoc[manual installation via Helm]. And, if you are interested on learning more about the available values, you can check the xref:../../reference-guides/rancher-turtles-chart/values.adoc[reference guide].
+
+The installation may take a few minutes and, when it finishes, you will be able to see the following new deployments in the cluster:
+
+* `rancher-turtles-system/rancher-turtles-controller-manager`
+* `rancher-turtles-system/rancher-turtles-cluster-api-operator`
+* `capi-system/capi-controller-manager`
+* `rke2-bootstrap-system/rke2-bootstrap-controller-manager`
+* `rke2-control-plane-system/rke2-control-plane-controller-manager`
+
+image::deployments-turtles.png[deployments-turtles]
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/rancher.adoc b/docs/v0.15.0/modules/en/pages/getting-started/rancher.adoc
new file mode 100644
index 00000000..144df898
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/rancher.adoc
@@ -0,0 +1,30 @@
+= Rancher Setup
+:sidebar_position: 2
+
+== Installing Rancher
+
+_If you're already running Rancher, you can skip this section and jump to xref:./install-rancher-turtles/using_rancher_dashboard.adoc[Install {product_name}]._
+
+Helm is the recommended way to install `Rancher` in an existing or new Kubernetes cluster.
+
+[TIP]
+====
+Make sure to follow one of the official https://ranchermanager.docs.rancher.com/pages-for-subheaders/installation-and-upgrade[installation guides] for Rancher.
+====
+
+
+Here's a minimal configuration example of a command to install `Rancher`:
+
+[source,bash]
+----
+helm install rancher rancher-stable/rancher
+ --namespace cattle-system
+ --create-namespace
+ --set hostname=
+ --version
+ --wait
+----
+
+Replace `` with the actual hostname of your `Rancher` server and use the `--version` option to specify the version of `Rancher` you want to install. In this case, use the xref:../index.adoc#_prerequisites[recommended] `Rancher` version for `{product_name}`.
+
+You are now ready to install and use {product_name}! 🎉
diff --git a/docs/v0.15.0/modules/en/pages/getting-started/uninstall_turtles.adoc b/docs/v0.15.0/modules/en/pages/getting-started/uninstall_turtles.adoc
new file mode 100644
index 00000000..8bbf03f5
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/getting-started/uninstall_turtles.adoc
@@ -0,0 +1,52 @@
+= Uninstall {product_name}
+:sidebar_position: 5
+
+This gives an overview of {product_name} uninstallation process.
+
+[CAUTION]
+====
+When installing {product_name} in your Rancher environment, by default, {product_name} enables the Cluster API Operator cleanup. This includes cleaning up Cluster API Operator specific webhooks and deployments that otherwise cause issues with Rancher provisioning.
+
+To simplify uninstalling {product_name} (via Rancher Manager or helm command), the official {product_name} Helm chart includes a `post-delete` hook that applies these changes, making it transparent to the end user:
+
+* Delete the `mutating-webhook-configuration` and `validating-webhook-configuration` webhooks that are no longer needed.
+* Delete the CAPI `deployments` that are no longer needed.
+====
+
+
+To uninstall the {product_name} Extension use the following helm command:
+
+[source,bash]
+----
+helm uninstall -n rancher-turtles-system rancher-turtles --cascade foreground --wait
+----
+
+This may take a few minutes to complete.
+
+[NOTE]
+====
+Remember that, if you use a different name for the installation or a different namespace, you may need to customize the command for your specific configuration.
+====
+
+
+Once uninstalled, Rancher's `embedded-cluster-api` feature must be re-enabled:
+
+. Create a `feature.yaml` file, with `embedded-cluster-api` set to true:
++
+.feature.yaml
+[source,yaml]
+----
+apiVersion: management.cattle.io/v3
+kind: Feature
+metadata:
+ name: embedded-cluster-api
+spec:
+ value: true
+----
++
+. Use `kubectl` to apply the `feature.yaml` file to the cluster:
++
+[source,bash]
+----
+kubectl apply -f feature.yaml
+----
diff --git a/docs/v0.15.0/modules/en/pages/index.adoc b/docs/v0.15.0/modules/en/pages/index.adoc
new file mode 100644
index 00000000..3af51403
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/index.adoc
@@ -0,0 +1,80 @@
+= Introduction
+:page_project_origin: /getting-started/intro.md
+:page_project_slug: /
+
+[WARNING]
+====
+Starting with Turtles `v0.9.0`, the process used for importing CAPI clusters into Rancher is now based on a different controller logic. If you are a new user of Turtles, you can proceed normally and simply install the extension. If you have been using previous versions of Turtles and are upgrading to `v0.9.0`, we recommend you take a look at the migration mechanisms and their implications:
+
+* xref:../tasks/maintenance/automigrate_to_v3_import.adoc[Automatic migration].
+* xref:../tasks/maintenance/import_controller_upgrade.adoc[Manual migration]
+====
+
+
+{product_name} is a Kubernetes Operator that provides integration between Rancher Manager and Cluster API (CAPI) with the aim of bringing full CAPI support to Rancher. With {product_name}, you can:
+
+* Automatically import CAPI clusters into Rancher, by installing the Rancher Cluster Agent in CAPI provisioned clusters.
+* Configure the CAPI Operator.
+
+== Demo
+
+This demo shows how to use the Rancher UI to install {product_name}, create/import a CAPI cluster, and install monitoring on the cluster:
+
++++VIDEO +++
+
+== Prerequisites
+
+|===
+| Name | Version | Details
+
+| Kubernetes cluster
+| `>=1.30.0`
+|
+
+| Helm
+| `>=3.12.0`
+|
+
+| Rancher
+| `>=2.9.0`
+| Using https://ranchermanager.docs.rancher.com/pages-for-subheaders/install-upgrade-on-a-kubernetes-cluster#install-the-rancher-helm-chart[helm based] installation on any kubernetes cluster directly or on a newly created https://ranchermanager.docs.rancher.com/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-amazon-eks[Amazon], https://ranchermanager.docs.rancher.com/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-aks[Azure] or https://ranchermanager.docs.rancher.com/getting-started/installation-and-upgrade/install-upgrade-on-a-kubernetes-cluster/rancher-on-gke[Google] service based options.
+
+| Cert-manager
+| `>=v1.15.2`
+| Using https://cert-manager.io/docs/installation/helm/#installing-with-helm[helm] based installation or via https://cert-manager.io/docs/installation/#default-static-install[kubectl apply].
+
+| Cluster API Operator
+| `>=v0.14.0`
+| Using xref:./getting-started/install-rancher-turtles/using_rancher_dashboard.adoc[Rancher UI] (recommended) or https://github.com/kubernetes-sigs/cluster-api-operator/blob/main/docs/README.md#method-2-use-helm-charts[Helm install] (for development use cases)
+
+| Cluster API
+| `v1.7.7`
+|
+
+| {product_name}
+| `>v0.13.0`
+| Using xref:./getting-started/install-rancher-turtles/using_rancher_dashboard.adoc[Rancher UI] (recommended) or xref:./getting-started/install-rancher-turtles/using_helm.adoc[Helm install] (for advanced use cases)
+|===
+
+== Reference Guides
+
+This section focuses on implementation details including
+xref:./reference-guides/architecture/intro.adoc[architecture], how {product_name} integrates with Rancher, and xref:./reference-guides/rancher-turtles-chart/values.adoc[Helm Chart configuration values].
+
+== Tasks
+
+In this section we cover additional xref:./tasks/intro.adoc[operational tasks] including basic `CAPIProvider` xref:./tasks/capi-operator/basic_cluster_api_provider_installation.adoc[installation], an xref:./tasks/capi-operator/add_infrastructure_provider.adoc[example] AWS infrastructure provider install using `CAPIProvider`, and xref:./tasks/maintenance/early_adopter_upgrade.adoc[upgrade instructions] for early adopters of {product_name}.
+
+== Developer Guide
+
+This section describes xref:./developer-guide/contributing_guidelines.adoc[how to get involved] in the development of Rancher Turtles as well as xref:./developer-guide/development.adoc[how to setup a local development environment], if you wish to do so.
+
+== Reference
+
+This section has a useful xref:./reference/glossary.adoc[glossary] to help you navigate Rancher and Cluster API concepts.
+
+== Security
+
+{product_name} meets https://slsa.dev/spec/v1.0/levels#build-l3[SLSA Level 3] requirements as an appropriate hardened build platform, with consistent build processes, and provenance distribution. This section contains more information on security-related topics:
+
+* xref:./security/slsa.adoc[SLSA]
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/architecture/components.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/architecture/components.adoc
new file mode 100644
index 00000000..7790ecb6
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/architecture/components.adoc
@@ -0,0 +1,30 @@
+= Components
+:sidebar_position: 0
+
+Below is a visual representation of the architecture components of Rancher
+Turtles. This diagram illustrates the key elements and their relationships
+within the {product_name} system. Understanding these components is essential
+for gaining insights into how Rancher leverages Cluster API (CAPI) for cluster
+management.
+
+image::30000ft_view.png[overview]
+
+== Rancher Manager
+
+This is the core component of Rancher and users can leverage the existing
+Explorer feature in the dashboard to access cluster workload details.
+
+== Rancher Cluster Agent
+
+The agent is deployed within child clusters, enabling Rancher to import and
+establish a connection with these clusters. This connection allows Rancher to
+manage the child clusters effectively from within its platform.
+
+== {product_name} - Rancher CAPI Extension
+
+It provides integration between CAPI and Rancher while currently supporting the
+following functionalities:
+
+* *Importing CAPI clusters into Rancher:* installing Rancher Cluster Agent in
+CAPI provisioned clusters.
+* *CAPI Operator Configuration:* Configuration support for the CAPI Operator.
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/architecture/deployment.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/architecture/deployment.adoc
new file mode 100644
index 00000000..e0b70390
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/architecture/deployment.adoc
@@ -0,0 +1,23 @@
+= Deployment Scenarios
+:sidebar_position: 0
+
+[NOTE]
+====
+Currently {product_name} only supports having Rancher Manager and
+{product_name} running in the same cluster. A topology with a separate Rancher
+Manager cluster and one/multiple CAPI management cluster/s will be supported in
+future releases.
+====
+
+
+== Rancher Manager & CAPI Management Combined
+
+In this topology, both Rancher Manager and {product_name} are deployed to the
+same Kubernetes cluster, and it acts as a centralized management cluster.
+
+image::in_cluster_topology.png[Rancher Manager & CAPI Management Combined]
+
+This architecture offers a simplified deployment of components and provides a
+single view of all clusters. On the flip side, it's important to consider that
+the number of clusters that can be managed effectively by Cluster API (CAPI) is
+limited by the resources available within the single management cluster.
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/architecture/intro.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/architecture/intro.adoc
new file mode 100644
index 00000000..b8e11e80
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/architecture/intro.adoc
@@ -0,0 +1,20 @@
+= Introduction
+:sidebar_position: 0
+
+This guide offers a comprehensive overview of the core components and structure
+that power {product_name} and its integration within the Rancher ecosystem.
+
+[TIP]
+====
+For guidance about setting up Rancher, refer to
+xref:../../getting-started/rancher.adoc[Rancher Setup]
+
+For information on how to install {product_name}, refer to
+xref:../../getting-started/install-rancher-turtles/using_rancher_dashboard.adoc[Install {product_name} using Rancher Dashboard]
+====
+
+
+*A Rancher User will use Rancher to manage clusters. Rancher will be able to use
+Cluster API to manage the lifecycle of child Kubernetes clusters.*
+
+image::intro.png[intro]
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/providers/addon-provider-fleet.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/providers/addon-provider-fleet.adoc
new file mode 100644
index 00000000..97101640
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/providers/addon-provider-fleet.adoc
@@ -0,0 +1,38 @@
+= Cluster API Addon Provider Fleet
+
+== Overview
+
+Cluster API Add-on Provider for `Fleet` (CAAPF) is a Cluster API (CAPI) provider that provides integration with https://fleet.rancher.io/[`Fleet`] to enable the easy deployment of applications to a CAPI provisioned cluster.
+
+== Functionality
+
+* The provider will register a newly provisioned CAPI cluster with `Fleet` by creating a `Fleet` `Cluster` instance with the same `name` and `namespace`. Applications can be automatically deployed to the created cluster using `GitOps`.
+* The provider will automatically create a Fleet `ClusterGroup` for every CAPI `ClusterClass` in the `ClusterClass` namespace. This enables you to deploy the same applications to all clusters created from the same `ClusterClass`.
+
+This allows a user to specify either a https://fleet.rancher.io/ref-bundle[`Bundle`] resource with raw application workloads, or https://fleet.rancher.io/ref-gitrepo[`GitRepo`] to install applications from git. Each of the resources can provide https://fleet.rancher.io/gitrepo-targets#defining-targets[`targets`] with any combination of:
+
+[source,yaml]
+----
+ targets:
+ - clusterGroup: # If the cluster is created from cluster-class
+ - clusterName:
+----
+
+Additionally, `CAAPF` automatically propagates `CAPI` cluster labels to the `Fleet` cluster resource, so user can specify a target matching common cluster label with:
+
+[source,yaml]
+----
+ targets:
+ - clusterSelector:
+ - clusterGroupSelector:
+----
+
+== Example - deploying kindnet CNI
+
+[CAUTION]
+====
+The following example requires `Fleet` version `>=v0.10.1-rc.1`, which is not a part of `rancher/charts` yet.
+====
+
+
+*Demo*: image:https://asciinema.org/a/seEFHKz5DVpUe5CQvWcddSJBp.svg[asciicast,link=https://asciinema.org/a/seEFHKz5DVpUe5CQvWcddSJBp]
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/providers/certified.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/providers/certified.adoc
new file mode 100644
index 00000000..7134c409
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/providers/certified.adoc
@@ -0,0 +1,95 @@
+= Certified CAPI Providers
+
+Remember that most Cluster API Providers are upstream projects maintained by the Kubernetes open-source community.
+
+== List of certified providers
+
+[NOTE]
+====
+This list is constantly evolving to reflect the ongoing development of the project.
+====
+
+
+This is a list of the officially certified CAPI Providers by Turtles. These providers are covered by our test suite and we actively ensure that they work properly with the CAPI extension for Rancher.
+
+|===
+| Platform | Code Name | Provider Type | Docs
+
+| *RKE2*
+| CAPRKE2
+| Bootstrap/Control Plane
+| https://rancher.github.io/cluster-api-provider-rke2
+
+| *Kubeadm*
+| Kubeadm
+| Bootstrap/Control Plane
+| https://cluster-api.sigs.k8s.io/tasks/bootstrap/kubeadm-bootstrap
+
+| *AWS*
+| CAPA
+| Infrastructure
+| https://cluster-api-aws.sigs.k8s.io
+
+| *Docker**
+| CAPD
+| Infrastructure
+| https://cluster-api.sigs.k8s.io
+
+| *vSphere*
+| CAPV
+| Infrastructure
+| https://github.com/kubernetes-sigs/cluster-api-provider-vsphere
+
+| *Azure* (Only AKS managed clusters)
+| CAPZ
+| Infrastructure
+| https://capz.sigs.k8s.io/
+
+| *Addon Provider Fleet*
+| CAAPF
+| Addon
+| http://github.com/rancher-sandbox/cluster-api-addon-provider-fleet
+|===
+
+*Recommended only for development purposes.
+
+== List of providers in experimental mode
+
+This is a list of providers that are in an advanced state of development and will soon become certified.
+
+|===
+| Platform | Code Name | Provider Type | Docs
+|===
+
+== ClusterClass Support for Certified Providers
+
+The following is a support matrix for each certified provider and their support of the cluster topology feature:
+
+[tabs]
+======
+CAPZ::
++
+- **Full support** of `ClusterClass`: both managed (AKS) and unmanaged (virtual machines) clusters can be provisioned via topology.
+
+CAPA::
++
+- **Supports** `ClusterClass` when provisioning unmanaged (EC2-based) clusters.
+- **Does not support** `ClusterClass` when provisioning managed (EKS) clusters: this is a work-in-progress.
+
+CAPRKE2::
++
+- **Full support** of `ClusterClass`.
+
+CABPK::
++
+- **Full support** of `ClusterClass`.
+
+CAPV::
++
+- **Full support** of `ClusterClass`.
+
+CAPD::
++
+- **Full support** of `ClusterClass`.
+
+======
\ No newline at end of file
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/providers/howto.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/providers/howto.adoc
new file mode 100644
index 00000000..752ede87
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/providers/howto.adoc
@@ -0,0 +1,166 @@
+:doctype: book
+
+= Create & import a cluster using CAPI providers
+
+This guide goes over the process of creating and importing CAPI clusters with a selection of the officially certified providers.
+
+Keep in mind that most Cluster API Providers are upstream projects maintained by the Kubernetes open-source community.
+
+== Prerequisites
+
+[tabs]
+======
+AWS RKE2::
++
+* Rancher Manager cluster with {product_name} installed
+* Cluster API Providers: you can find a guide on how to install a provider using the `CAPIProvider` resource xref:../tasks/capi-operator/basic_cluster_api_provider_installation.adoc[here]
+** https://github.com/kubernetes-sigs/cluster-api-provider-aws/[Infrastructure provider for AWS]
+** https://github.com/rancher/cluster-api-provider-rke2[Bootstrap/Control Plane provider for RKE2]
+* **clusterctl** CLI - see https://cluster-api.sigs.k8s.io/user/quick-start#install-clusterctl[install clusterctl from CAPI book]
+
+AWS Kubeadm::
++
+* Rancher Manager cluster with {product_name} installed
+* Cluster API Providers: you can find a guide on how to install a provider using the `CAPIProvider` resource xref:../tasks/capi-operator/basic_cluster_api_provider_installation.adoc[here]
+** https://github.com/kubernetes-sigs/cluster-api-provider-aws/[Infrastructure provider for AWS]
+** https://github.com/kubernetes-sigs/cluster-api[Bootstrap/Control Plane provider for Kubeadm]
+* **clusterctl** CLI - see https://cluster-api.sigs.k8s.io/user/quick-start#install-clusterctl[install clusterctl from CAPI book]
+
+Docker Kubeadm::
++
+* Rancher Manager cluster with {product_name} installed
+* Cluster API Providers: you can find a guide on how to install a provider using the `CAPIProvider` resource xref:../tasks/capi-operator/basic_cluster_api_provider_installation.adoc[here]
+** https://github.com/kubernetes-sigs/cluster-api[Infrastructure provider for Docker]
+** https://github.com/kubernetes-sigs/cluster-api[Bootstrap/Control Plane provider for Kubeadm]
+* **clusterctl** CLI - see https://cluster-api.sigs.k8s.io/user/quick-start#install-clusterctl[install clusterctl from CAPI book]
+======
+
+== Create Your Cluster Definition
+
+[tabs]
+======
+AWS RKE2::
++
+Before creating an AWS+RKE2 workload cluster, it is required to build an AMI for the RKE2 version that is going to be installed on the cluster. You can follow the steps in the https://github.com/rancher/cluster-api-provider-rke2/tree/main/image-builder#aws[RKE2 image-builder README] to build the AMI.
++
+We recommend you refer to the CAPRKE2 repository where you can find a https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws[samples folder] with different CAPA+CAPRKE2 cluster configurations that can be used to provision downstream clusters. The https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/internal[internal folder] contains cluster templates to deploy an RKE2 cluster on AWS using the internal cloud provider, and the https://github.com/rancher/cluster-api-provider-rke2/tree/main/samples/aws/external[external folder] contains the cluster templates to deploy a cluster with the external cloud provider.
++
+We will use the `internal` one for this guide, however the same steps apply for `external`.
++
+To generate the YAML for the cluster, do the following:
++
+. Open a terminal and run the following:
++
+[source,bash]
+----
+export CLUSTER_NAME=cluster1
+export CONTROL_PLANE_MACHINE_COUNT=3
+export WORKER_MACHINE_COUNT=3 export RKE2_VERSION=v1.30.3+rke2r1
+export AWS_NODE_MACHINE_TYPE=t3a.large
+export AWS_CONTROL_PLANE_MACHINE_TYPE=t3a.large
+export AWS_SSH_KEY_NAME="aws-ssh-key" export AWS_REGION="aws-region"
+export AWS_AMI_ID="ami-id"
+
+curl -s https://raw.githubusercontent.com/rancher/cluster-api-provider-rke2/refs/heads/main/examples/aws/cluster-template.yaml | envsubst > cluster1.yaml
+----
++
+. View **cluster1.yaml** and examine the resulting yaml file. You can make any changes you want as well.
++
+> The Cluster API quickstart guide contains more detail. Read the steps related to this section https://cluster-api.sigs.k8s.io/user/quick-start.html#required-configuration-for-common-providers[here].
+
+. Create the cluster using kubectl
++
+[source,bash]
+----
+bash kubectl create -f cluster1.yaml
+----
+
+AWS Kubeadm::
++
+To generate the YAML for the cluster, do the following:
++
+. Open a terminal and run the following:
++
+[source,bash]
+----
+export CLUSTER_NAME=cluster1
+export KUBERNETES_VERSION=v1.30
+export AWS_REGION=eu-west-2
+export AWS_INSTANCE_TYPE=t3.medium
+
+curl -s https://raw.githubusercontent.com/rancher-sandbox/rancher-turtles-fleet-example/templates/capa.yaml | envsubst > cluster1.yaml
+----
++
+. View **cluster1.yaml** to ensure there are no tokens (i.e. SSH keys or cloud credentials). You can make any changes you want as well.
++
+> The Cluster API quickstart guide contains more detail. Read the steps related to this section https://cluster-api.sigs.k8s.io/user/quick-start.html#required-configuration-for-common-providers[here].
+
+. Create the cluster using kubectl
++
+[source,bash]
+----
+ kubectl create -f cluster1.yaml
+----
+
+Docker Kubeadm::
++
+To generate the YAML for the cluster, do the following:
++
+. Open a terminal and run the following:
++
+[source,bash]
+----
+export CLUSTER_NAME=cluster1
+export CONTROL_PLANE_MACHINE_COUNT=1
+export WORKER_MACHINE_COUNT=1
+export KUBERNETES_VERSION=v1.30.0
+
+curl -s https://raw.githubusercontent.com/rancher-sandbox/rancher-turtles-fleet-example/templates/docker-kubeadm.yaml | envsubst > cluster1.yaml
+----
++
+. View **cluster1.yaml** to ensure there are no tokens. You can make any changes you want as well.
++
+> The Cluster API quickstart guide contains more detail. Read the steps related to this section https://cluster-api.sigs.k8s.io/user/quick-start.html#required-configuration-for-common-providers[here].
+
+. Create the cluster using kubectl
++
+[source,bash]
+----
+kubectl create -f cluster1.yaml
+----
+
+======
+
+[TIP]
+====
+After your cluster is provisioned, you can check functionality of the workload cluster using `kubectl`:
+
+[source,bash]
+----
+kubectl describe cluster cluster1
+----
+
+Remember that clusters are namespaced resources. These examples provision clusters in the `default` namespace, but you will need to provide yours if using a different one.
+====
+
+
+== Mark Namespace or Cluster for Auto-Import
+
+To automatically import a CAPI cluster into Rancher Manager, there are 2 options:
+
+. Label a namespace so all clusters contained in it are imported.
+. Label an individual cluster definition so that it's imported.
+
+Labeling a namespace:
+
+[source,bash]
+----
+kubectl label namespace default cluster-api.cattle.io/rancher-auto-import=true
+----
+
+Labeling an individual cluster definition:
+
+[source,bash]
+----
+kubectl label cluster.cluster.x-k8s.io -n default cluster1 cluster-api.cattle.io/rancher-auto-import=true
+----
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc
new file mode 100644
index 00000000..53e75871
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/rancher-turtles-chart/values.adoc
@@ -0,0 +1,73 @@
+= Chart configuration
+:sidebar_position: 0
+
+[TIP]
+====
+For the up-to-date content of `values.yaml` source file, refer to the https://github.com/rancher/turtles[{product_name} repository].
+====
+
+
+== {product_name} values
+
+When installing {product_name} using the official Helm chart, it is possible to configure a number of feature flags. This is a comprehensive list of the available values and their usage:
+
+[source,yaml]
+----
+rancherTurtles:
+ features:
+ cluster-api-operator:
+ cleanup: true # indicates that rancher turtles resources are cleaned up after uninstalling (default: true)
+ kubectlImage: registry.k8s.io/kubernetes/kubectl:v1.30.0 # indicates the image to use for post-delete cleanup (default: Kubernetes container image registry)
+ embedded-capi: # this is a rancher functionality that is not compatible with rancher-turtles
+ disabled: true # indicates that embedded-capi must be disabled during installation (default: true)
+ rancher-webhook: # an existing rancher installation keeps rancher webhooks after disabling embedded-capi
+ cleanup: true # indicates that the remaining rancher webhooks be removed (default: true)
+ kubectlImage: registry.k8s.io/kubernetes/kubectl:v1.30.0 # indicates the image to use for pre-install cleanup (default: Kubernetes container image registry)
+ rancher-kubeconfigs: # with capi 1.5.0 and greater, secrets for kubeconfigs must contain a specific label. See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/book/src/developer/providers/migrations/v1.4-to-v1.5.md#other
+ label: true # indicates that the label will be added (default: true)
+ managementv3-cluster: # rancher will use `clusters.management.cattle.io` to represent an imported capi cluster
+ enabled: false # if false, indicates that `clusters.provisioning.cattle.io` resources will be used (default: false)
+----
+
+The list has been truncated to show only the configurable feature flags. Other fields with the `rancherTurtles` key are automatically set when a chart is released.
+
+== Cluster API Operator values
+
+Any values passed to `helm` with the `cluster-api-operator` key will be passed along to the `Cluster API Operator` project.
+
+[TIP]
+====
+A full set of available values for the `Cluster API Operator` can be found in the operator https://github.com/kubernetes-sigs/cluster-api-operator/blob/main/hack/charts/cluster-api-operator/values.yaml[values.yaml].
+====
+
+
+Currently the available set of values for the `cluster-api-operator` setup in the `rancher-turtles`:
+
+[source,yaml]
+----
+cluster-api-operator:
+ enabled: true # indicates if CAPI operator should be installed (default: true)
+ cluster-api:
+ enabled: true # indicates if core CAPI controllers should be installed (default: true)
+ configSecret:
+ name: "" # (provide only if using a user-managed secret) name of the config secret to use for core CAPI controllers, used by the CAPI operator. See https://github.com/kubernetes-sigs/cluster-api-operator/tree/main/docs#installing-azure-infrastructure-provider docs for more details.
+ defaultName: "capi-env-variables" # default name for the automatically created secret.
+ core:
+ namespace: capi-system
+ fetchConfig: # (only required for airgapped environments)
+ url: "" # url to fetch config from, used by the CAPI operator. See https://github.com/kubernetes-sigs/cluster-api-operator/tree/main/docs#provider-spec docs for more details.
+ selector: "" # selector to use for fetching config, used by the CAPI operator.
+ rke2:
+ enabled: true # indicates if RKE2 provider for Cluster API should be installed (default: true)
+ version: "" # version of Cluster API Provider RKE2 (CAPRKE2) to install
+ bootstrap: # CAPRKE2 Bootstrap Provider
+ namespace: rke2-bootstrap-system
+ fetchConfig: # (only required for airgapped environments)
+ url: "" # url to fetch config from, used by the CAPI operator. See https://github.com/kubernetes-sigs/cluster-api-operator/tree/main/docs#provider-spec docs for more details.
+ selector: "" # selector to use for fetching config, used by the CAPI operator.
+ controlPlane: # CAPRKE2 Control Plane Provider
+ namespace: rke2-control-plane-system
+ fetchConfig: # (only required for airgapped environments)
+ url: "" # url to fetch config from, used by the CAPI operator. See https://github.com/kubernetes-sigs/cluster-api-operator/tree/main/docs#provider-spec docs for more details.
+ selector: "" # selector to use for fetching config, used by the CAPI operator.
+----
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/test-suite/intro.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/test-suite/intro.adoc
new file mode 100644
index 00000000..d23fdbd5
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/test-suite/intro.adoc
@@ -0,0 +1,11 @@
+= Introduction
+:sidebar_position: 1
+
+This section contains information on how you can leverage the existing E2E suite to integrate any CAPI providers with Turtles and verify that the provisioning and importing of clusters works as expected. The validation performs the following actions:
+
+* Create a management cluster in the desired environment.
+* Install Rancher and Turtles with all prerequisites.
+* Install Gitea.
+* Run the suite that will create a git repo, apply cluster template using Fleet and verify the cluster is created and successfully imported in Rancher.
+
+The test suite can be used for certification of providers not listed in the xref:../../reference-guides/providers/certified.adoc[Certification table], as detailed in xref:../../tasks/provider-certification/intro.adoc[Provider Certification].
diff --git a/docs/v0.15.0/modules/en/pages/reference-guides/test-suite/usage.adoc b/docs/v0.15.0/modules/en/pages/reference-guides/test-suite/usage.adoc
new file mode 100644
index 00000000..27f5a281
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference-guides/test-suite/usage.adoc
@@ -0,0 +1,106 @@
+= Test suite guide
+
+The main reference for reusing the test suite is https://github.com/rancher-sandbox/turtles-integration-suite-example[this repository], which contains an example on how to integrate a given CAPI provider with {product_name} and applies a series of checks based on a GitOps workflow.
+
+== Before execution
+
+The end-to-end test environment used in Turtles provides a number of configuration alternatives depending on the type of test you are running and the type of checks you are performing. If getting started with the test suite, we recommend you keep your configuration as simple as possible and limit the number of customizations so you can understand the process and its configuration details. You can start your journey on provider testing by cloning the sample repository:
+
+----
+git clone https://github.com/rancher-sandbox/turtles-integration-suite-example.git
+----
+
+The simplest test execution you can run creates a local environment that does not use an internet-facing endpoint. This limits the checks to only local downstream clusters (effectively, CAPI clusters provisioned via CAPD) but it is enough to run the example integration. You can simply run this local version by specifying that you intend to run it locally.
+
+----
+MANAGEMENT_CLUSTER_ENVIRONMENT="isolated-kind" make test
+----
+
+When checking the integration with other infrastructure providers (e.g. providers for cloud vendors), you will have to make your Rancher instance available via endpoint to the downstream clusters, which are no longer in your local environment. The `MANAGEMENT_CLUSTER_ENVIRONMENT` variable we used before, supports the following values:
+
+----
+MANAGEMENT_CLUSTER_ENVIRONMENT: "kind" # supported options are eks, isolated-kind, kind
+----
+
+`isolated-kind`, which is the value we used for local testing, and `kind` will deploy equivalent local environments. The difference is that `kind` will also configure a publicly accessible endpoint via https://ngrok.com/[ngrok]. You can get a free (limited) `ngrok` endpoint and use it for executing tests. Before running `make test`, you will also need to set the following environment variables:
+
+----
+NGROK_API_KEY: ""
+NGROK_AUTHTOKEN: ""
+----
+
+Using this configuration, during environment creation, the Rancher instance will be configured to be accessible via your `ngrok` endpoint and downstream clusters will be able to communicate with it.
+
+The <<_other_options,Other options>> section contains more information on what you can configure before execution.
+
+== Basic Workflow
+
+In previous sections we introduced the main actions performed in the sample test integration:
+
+[discrete]
+==== Create a management cluster in the desired environment.
+
+This is not a Turtles specific requirement as, when working with CAPI, there needs to be a management cluster that will be used to create resources that represent downstream clusters. This is the main part of the test environment and, depending on the environment variables passed to the test suite, it can either be hosted locally (using `kind`) or in the cloud (`eks`).
+
+[discrete]
+==== Install Rancher and Turtles with all prerequisites.
+
+Turtles is a Rancher extension and, as such, it needs a Rancher installation to be deployed. Rancher Manager will be run in the management cluster we created in the first step and the Turtles chart will be installed when Rancher is available. If using an internet-facing configuration, an ingress controller will make Rancher reachable from an outside network (e.g. cluster deployed in the cloud).
+
+[discrete]
+==== Run the suite that will create a git repo, apply cluster template using Fleet and verify the cluster is created and successfully imported in Rancher.
+
+The main test suite, and the one used as an example, is based on a GitOps flow and uses https://github.com/rancher/fleet[Fleet] as a GitOps orchestrator tool. Based on the cluster templates provided (you can check the ones that come with the example integration https://github.com/rancher-sandbox/turtles-integration-suite-example/tree/main/suites/data/cluster-templates[here]), it will create the CAPI clusters defined in the YAML files. Once this/these cluster/s are available, they will be configured to be xref:../../getting-started/create-first-cluster/using_fleet.adoc[imported into Rancher using Turtles] and it will verify that the downstream cluster/s is/are accessible via Rancher. It will also check that deletion can be performed on downstream clusters and that they are no longer available in Rancher.
+
+== Other options
+
+You can take a look at the `config.yaml` https://github.com/rancher-sandbox/turtles-integration-suite-example/blob/main/config/config.yaml[file] in the `turtles-integration-suite-example` repository, which contains a list of environment variables used during test environment deployment and test execution. The following is a truncated version of the above mentioned YAML file:
+
+----
+...
+variables:
+ CLUSTERCTL_BINARY_PATH: ""
+ USE_EXISTING_CLUSTER: "false"
+ SKIP_RESOURCE_CLEANUP: "false"
+ ARTIFACTS_FOLDER: "_artifacts"
+ MANAGEMENT_CLUSTER_ENVIRONMENT: "kind" # supported options are eks, isolated-kind, kind
+ RANCHER_VERSION: "v2.8.1"
+ KUBERNETES_VERSION: "v1.28.6"
+ KUBERNETES_MANAGEMENT_VERSION: "v1.27.0"
+ KUBERNETES_MANAGEMENT_AWS_REGION: "eu-west-2"
+ RKE2_VERSION: "v1.28.1+rke2r1"
+ TURTLES_PATH: "turtles/rancher-turtles"
+ TURTLES_REPO_NAME: "turtles"
+ TURTLES_URL: https://rancher.github.io/turtles
+ TURTLES_VERSION: "v0.10.0"
+ RANCHER_HOSTNAME: "localhost"
+ RANCHER_FEATURES: ""
+ RANCHER_PATH: "rancher-latest/rancher"
+ RANCHER_REPO_NAME: "rancher-latest"
+ RANCHER_URL: "https://releases.rancher.com/server-charts/latest"
+ CERT_MANAGER_URL: "https://charts.jetstack.io"
+ CERT_MANAGER_REPO_NAME: "jetstack"
+ CERT_MANAGER_PATH: "jetstack/cert-manager"
+ ...
+ ...
+ ...
+ HELM_BINARY_PATH: "helm"
+ HELM_EXTRA_VALUES_FOLDER: "/tmp"
+ # Additional setup for establishing rancher ingress
+ NGROK_REPO_NAME: "ngrok"
+ NGROK_URL: "https://ngrok.github.io/kubernetes-ingress-controller"
+ NGROK_PATH: "ngrok/kubernetes-ingress-controller"
+ NGROK_API_KEY: ""
+ NGROK_AUTHTOKEN: ""
+ GITEA_REPO_NAME: "gitea-charts"
+ GITEA_REPO_URL: "https://dl.gitea.com/charts/"
+ GITEA_CHART_NAME: "gitea"
+ GITEA_CHART_VERSION: "9.4.0"
+ ...
+----
+
+[TIP]
+====
+You can refer to https://github.com/rancher/turtles/tree/main/test/e2e#e2e-tests[Turtles repository] to see all the suites and parameters you can use to customize test execution. We recommend doing this only if you are familiar with the deployment/configuration of the test environment and have specific integration requirements.
+====
+
diff --git a/docs/v0.15.0/modules/en/pages/reference/glossary.adoc b/docs/v0.15.0/modules/en/pages/reference/glossary.adoc
new file mode 100644
index 00000000..c2b85cc8
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference/glossary.adoc
@@ -0,0 +1,316 @@
+= Glossary
+:sidebar_position: 2
+
+== Table of Contents
+
+[cols=14*]
+|===
+| xref:./glossary.adoc#_a[A]
+| xref:./glossary.adoc#_b[B]
+| xref:./glossary.adoc#_c[C]
+| xref:./glossary.adoc#_f[F]
+| xref:./glossary.adoc#_i[I]
+| xref:./glossary.adoc#_k[K]
+| xref:./glossary.adoc#_m[M]
+| xref:./glossary.adoc#_n[N]
+| xref:./glossary.adoc#_o[O]
+| xref:./glossary.adoc#_p[P]
+| xref:./glossary.adoc#_r[R]
+| xref:./glossary.adoc#_s[S]
+| xref:./glossary.adoc#_t[T]
+| xref:./glossary.adoc#_w[W]
+|===
+
+== A
+
+=== Add-ons
+
+Services beyond the fundamental components required to deploy a Kubernetes-conformant cluster and categorized into two types:
+
+* *Core Add-ons*: Addons that are required to deploy a Kubernetes-conformant cluster: DNS, kube-proxy, CNI.
+* *Additional Add-ons*: Addons that are not required for a Kubernetes-conformant cluster (e.g. metrics/Heapster, Dashboard).
+
+=== Air-gapped environment
+
+Setting up and running Kubernetes clusters without direct access to the internet.
+
+== B
+
+=== Bootstrap
+
+The process of turning a server into a Kubernetes node. This may involve assembling data to provide when creating the server that backs the Machine, as well as runtime configuration of the software running on that server.
+
+=== Bootstrap cluster
+
+A temporary cluster that is used to provision a Target Management cluster.
+
+=== Bootstrap provider
+
+Refers to a <<_provider,provider>> that implements a solution for the <<_bootstrap,bootstrap>> process.
+
+== C
+
+=== CAPI
+
+Core Cluster API
+
+=== CAPA
+
+Cluster API Provider AWS
+
+=== CAPD
+
+Cluster API Provider Docker
+
+=== CAPG
+
+Cluster API Google Cloud Provider
+
+=== CAPIO
+
+Cluster API Operator
+
+=== CAPRKE2
+
+Cluster API Provider RKE2
+
+=== CAPV
+
+Cluster API Provider vSphere
+
+=== CAPZ
+
+Cluster API Provider Azure
+
+=== CAPI Provider
+
+A public API that facilitates provisioning and operations over the <<_cluster_api_operator,CAPI Operator>> and <<_cluster_api,Cluster API>> resources.
+
+=== Child Cluster
+
+Term commonly used interchangeably with the <<_workload_cluster,workload cluster>>.
+
+=== Cluster
+
+A full Kubernetes deployment. See Management Cluster and Workload Cluster.
+
+=== ClusterClass
+
+A collection of templates that define a topology (control plane and workers) to be used to continuously reconcile one or more Clusters.
+See xref:./../getting-started/cluster-class/create_cluster.adoc[ClusterClass]
+
+=== Cluster API
+
+Or *Cluster API project*
+
+The Cluster API is a sub-project of the SIG-cluster-lifecycle. It is also used to refer to the software components, APIs, and community that produce them.
+
+See <<_core_provider,core provider>>
+
+=== Cluster API Operator
+
+Or *Cluster API Operator project*
+
+The Cluster API Operator is a sub-project of the SIG-cluster-lifecycle. It is designed to empower cluster administrators to handle the lifecycle of Cluster API providers within a management cluster using a declarative approach.
+
+=== Cluster API Provider RKE2
+
+<<_caprke2,Cluster API Provider RKE2>> is a combination of two provider types: a Cluster API Control Plane Provider for provisioning Kubernetes control plane nodes and a Cluster API Bootstrap Provider for bootstrapping Kubernetes on a machine where <<_rke2,RKE2>> is used as the Kubernetes distro.
+
+=== Control plane
+
+The set of Kubernetes services that form the basis of a cluster. See also https://kubernetes.io/docs/concepts/#_kubernetes-control-plane There are two variants:
+
+* *Self-provisioned*: A Kubernetes control plane consisting of pods or machines wholly managed by a single Cluster API deployment.
+* *External* or *Managed*: A control plane offered and controlled by some system other than Cluster API (e.g., GKE, AKS, EKS, IKS).
+
+=== Control plane provider
+
+Refers to a <<_provider,provider>> that implements a solution for the management of a Kubernetes <<_control_plane,control plane>>.
+
+See <<_caprke2,CAPRRKE2>>, <<_kcp,KCP>>.
+
+=== Core provider
+
+Refers to a <<_provider,provider>> that implements Cluster API core controllers; if you consider that the first project that must be deployed in a management Cluster is Cluster API itself, it should be clear why the Cluster API project is also referred to as the core provider.
+
+See <<_cluster_api,CAPI>>.
+
+== F
+
+=== Fleet
+
+A container management and deployment engine designed to offer users more control on the local cluster and constant monitoring through GitOps. Take a look at https://fleet.rancher.io/[fleet documentation] to know more about Fleet.
+
+== I
+
+=== Infrastructure provider
+
+Refers to a <<_provider,provider>> that implements provisioning of infrastructure/computational resources required by
+the Cluster or by Machines (e.g. VMs, networking, etc.).
+Clouds infrastructure providers include AWS, Azure, or Google; while VMware, MAAS, or metal3.io can be defined as bare metal providers.
+
+=== IPAM provider
+
+Refers to a <<_provider,provider>> that allows Cluster API to interact with IPAM solutions.
+IPAM provider's interaction with Cluster API is based on the `IPAddressClaim` and `IPAddress` API types.
+
+== K
+
+=== Kubernetes-conformant
+
+Or *Kubernetes-compliant*
+
+A cluster that passes the Kubernetes conformance tests.
+
+=== Kubernetes Operator
+
+A Kubernetes Operator is a method of packaging, deploying, and managing a Kubernetes application. See also https://kubernetes.io/docs/concepts/extend-kubernetes/operator/ for more information.
+
+=== k/k
+
+Refers to the https://github.com/kubernetes/kubernetes[main Kubernetes git repository] or the main Kubernetes project.
+
+=== KCP
+
+Kubeadm Control plane Provider
+
+== M
+
+=== Machine
+
+Or *Machine Resource*
+
+The Custom Resource for Kubernetes that represents an infrastructure component that hosts a Kubernetes node.
+
+=== Manage a cluster
+
+Perform create, scale, upgrade, or destroy operations on the cluster.
+
+=== Managed Kubernetes
+
+Managed Kubernetes refers to any Kubernetes cluster provisioning and maintenance abstraction, usually exposed as an API, that is natively available in a Cloud provider. For example: https://aws.amazon.com/eks/[EKS], https://www.oracle.com/cloud/cloud-native/container-engine-kubernetes/[OKE], https://azure.microsoft.com/en-us/products/kubernetes-service[AKS], https://cloud.google.com/kubernetes-engine[GKE], https://www.ibm.com/cloud/kubernetes-service[IBM Cloud Kubernetes Service], https://www.digitalocean.com/products/kubernetes[DOKS], and many more throughout the Kubernetes Cloud Native ecosystem.
+
+=== Managed Topology
+
+See <<_topology,Topology>>
+
+=== Management cluster
+
+The cluster where one or more Infrastructure Providers run, and where resources (e.g. Machines) are stored. Typically referred to when you are provisioning multiple workload clusters.
+
+== N
+
+=== Node pools
+
+A node pool is a group of nodes within a cluster that all have the same configuration.
+
+== O
+
+=== Operating system
+
+Or *OS*
+
+A generically understood combination of a kernel and system-level userspace interface, such as Linux or Windows, as opposed to a particular distribution.
+
+== P
+
+=== Pivot
+
+Pivot is a process for moving the provider components and declared cluster-api resources from a Source Management cluster to a Target Management cluster.
+
+The pivot process is also used for deleting a management cluster and could also be used during an upgrade of the management cluster.
+
+=== Provider
+
+Or *Cluster API provider*
+
+This term was originally used as abbreviation for <<_infrastructure_provider,Infrastructure provider>>, but currently it is used
+to refer to any project that can be deployed and provides functionality to the Cluster API management Cluster.
+
+See <<_bootstrap_provider,Bootstrap provider>>, <<_control_plane_provider,Control plane provider>>, <<_core_provider,Core provider>>,
+<<_infrastructure_provider,Infrastructure provider>>, <<_ipam_provider,IPAM provider>>, <<_runtime_extension_provider,Runtime extension provider>>.
+
+=== Provider components
+
+Refers to the YAML artifact published as part of the release process for <<_provider,providers>>;
+it usually includes Custom Resource Definitions (CRDs), Deployments (to run the controller manager), RBAC, etc.
+
+In some cases, the same expression is used to refer to the instances of above components deployed in a management cluster.
+
+See <<_provider_repository,Provider repository>>
+
+=== Provider repository
+
+Refers to the location where the YAML for <<_provider_components,provider components>> are hosted; usually a provider repository hosts
+many version of provider components, one for each released version.
+
+== R
+
+=== Rancher
+
+An open-source https://www.rancher.com/[platform] designed to simplify the deployment and management of Kubernetes clusters.
+
+=== Rancher Cluster Agent
+
+A component deployed by Rancher in each Kubernetes cluster it manages. Its primary role is to establish a secure communication channel between the Rancher server and the Kubernetes cluster, enabling Rancher to manage and interact with the cluster.
+
+=== Rancher Manager
+
+The Rancher Manager (or Rancher Server) is where the Rancher UI and API are hosted, and it communicates with managed clusters through components like the <<_rancher_cluster_agent,Rancher Cluster Agent>>. It allows users to manage their Kubernetes clusters, applications, and Rancher-specific resources such as Catalogs, Users, Global Roles, and more.
+
+=== RKE2
+
+Rancher's next-generation, fully conformant Kubernetes distribution that focuses on security and compliance within the U.S. Federal Government sector. See https://docs.rke2.io/[documentation] for more details.
+
+=== Runtime Extension
+
+An external component which is part of a system built on top of Cluster API that can handle requests for a specific Runtime Hook.
+
+=== Runtime Extension provider
+
+Refers to a <<_provider,provider>> that implements one or more <<_runtime_extension,runtime extensions>>.
+
+== S
+
+=== Scaling
+
+Unless otherwise specified, this refers to horizontal scaling.
+
+=== Stacked control plane
+
+A control plane node where etcd is colocated with the Kubernetes API server, and
+is running as a static pod.
+
+=== Server
+
+The infrastructure that backs a <<_machine,Machine Resource>>, typically either a cloud instance, virtual machine, or physical host.
+
+=== {product_name}
+
+A <<_kubernetes_operator,Kubernetes operator>> that provides integration between Rancher Manager and Cluster API (CAPI) with the aim of bringing full CAPI support to Rancher.
+
+== T
+
+=== Topology
+
+A field in the Cluster object spec that allows defining and managing the shape of the Cluster's control plane and worker machines from a single point of control. The Cluster's topology is based on a <<_clusterclass,ClusterClass>>.
+Sometimes it is also referred as a managed topology.
+
+See <<_clusterclass,ClusterClass>>
+
+=== Turtles
+
+Refers to <<_suse_rancher_prime_cluster_api,{product_name}>>
+
+== W
+
+=== Workload Cluster
+
+A cluster created by a ClusterAPI controller, which is _not_ a bootstrap cluster, and is meant to be used by end-users, as opposed to by CAPI tooling.
+
+=== WorkerClass
+
+A collection of templates that define a set of worker nodes in the cluster. A ClusterClass contains zero or more WorkerClass definitions.
+
+See <<_clusterclass,ClusterClass>>
diff --git a/docs/v0.15.0/modules/en/pages/reference/intro.adoc b/docs/v0.15.0/modules/en/pages/reference/intro.adoc
new file mode 100644
index 00000000..53779ece
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/reference/intro.adoc
@@ -0,0 +1,4 @@
+= Introduction
+:sidebar_position: 0
+
+In this section, we will explore references in {product_name}.
diff --git a/docs/v0.15.0/modules/en/pages/security/slsa.adoc b/docs/v0.15.0/modules/en/pages/security/slsa.adoc
new file mode 100644
index 00000000..dae5fe94
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/security/slsa.adoc
@@ -0,0 +1,56 @@
+= SLSA
+:sidebar_position: 1
+
+== Overview
+
+https://slsa.dev/spec/v1.0/about[SLSA] is a set of incrementally adoptable guidelines for supply chain security, established by industry consensus. The specification set by SLSA is useful for both software producers and consumers: producers can follow SLSA's guidelines to make their software supply chain more secure, and consumers can use SLSA to make decisions about whether to trust a software package.
+
+{product_name} meets https://slsa.dev/spec/v1.0/levels[SLSA Level 3] requirements.
+
+|===
+| Requirement | Required at SLSA L3 | Met by {product_name}
+
+| Choose an appropriate build platform
+| Yes
+| Yes
+
+| Follow a consistent build process
+| Yes
+| Yes
+
+| Distribute provenance
+| Yes
+| Yes
+|===
+
+== Build Platform
+
+* The {product_name} project uses Git for source code management.
+* All the {product_name} maintainers are required to have two-factor authentication enabled, to sign and sign off on all their contributions.
+* The {product_name} project uses GitHub Actions and GitHub Runners for building all its release artifacts.
+* The build and release process runs in isolation on an ephemeral environment provided by GitHub-hosted runners.
+
+== Build Process
+
+* The build and release process is defined in code and is kept under version control.
+* The GitHub Workflows make use of GitHub Actions pinned to certain versions and are kept up-to-date using GitHub Dependabot.
+* All changes to the build and release process are done via Pull Requests that must be approved by at least one {product_name} maintainer.
+* The release process can only be kicked off by a {product_name} maintainer by pushing a Git tag in the semver format.
+
+== Provenance
+
+* The {product_name} project uses the official https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] project for provenance generation and distribution.
+* The provenance for the release artifacts published to GitHub Container Registry and to Rancher Prime Registry is generated using the generator_container_slsa3 GitHub Workflow provided by the https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] project.
+* The provenance identifies the {product_name} container images using their digest in SHA-256 format.
+* The provenance is signed by Sigstore Cosign using the GitHub OIDC identity, and the public key to verify the provenance is stored in the public https://docs.sigstore.dev/logging/overview/[Rekor transparency log].
+* The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners.
+* The provenance of the {product_name} container images can be verified using the official https://github.com/slsa-framework/slsa-verifier[SLSA verifier tool].
+* The provenance generation workflows run on ephemeral and isolated virtual machines, which are fully managed by GitHub.
+* The provenance signing secrets are ephemeral and are generated through Sigstore's https://github.com/sigstore/cosign/blob/main/KEYLESS.md[keyless] signing procedure.
+* The https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines than the build and release process, so that the {product_name} build scripts don't have access to the signing secrets.
+
+== Isolation
+
+* The release process and the provenance generation are run in isolation on an ephemeral environment provided by GitHub-hosted runners.
+* The provenance generation is decoupled from the build process; the https://github.com/slsa-framework/slsa-github-generator[SLSA GitHub Generator] runs on separate virtual machines fully managed by GitHub.
+* The release process can't access the provenance signing key because the provenance generator runs in isolation on separate GitHub-hosted runners.
diff --git a/docs/v0.15.0/modules/en/pages/tasks/capi-operator/add_infrastructure_provider.adoc b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/add_infrastructure_provider.adoc
new file mode 100644
index 00000000..1940ce4f
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/add_infrastructure_provider.adoc
@@ -0,0 +1,53 @@
+= Installing AWS Infrastructure Provider using CAPIProvider resource
+
+This section describes how to install the AWS `InfrastructureProvider` via `CAPIProvider`, which is responsible for managing Cluster API AWS CRDs and the Cluster API AWS controller.
+
+[NOTE]
+====
+This section describes how to install the raw AWS `InfrastructureProvider`, which is responsible for managing the Cluster API AWS CRDs and the Cluster API AWS controller. The detailed configuration steps are described in the https://cluster-api-operator.sigs.k8s.io/03_topics/03_basic-cluster-api-provider-installation/02_installing-capz#installing-azure-infrastructure-provider[official] CAPI Operator documentation.
+====
+
+
+_Example:_
+
+[source,yaml]
+----
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: aws-variables
+ namespace: capa-system
+type: Opaque
+stringData:
+ AWS_B64ENCODED_CREDENTIALS: ZZ99ii==
+ ExternalResourceGC: "true"
+---
+apiVersion: turtles-capi.cattle.io/v1alpha1
+kind: CAPIProvider
+metadata:
+ name: aws
+ namespace: capa-system
+spec:
+ name: aws
+ type: infrastructure # required
+ version: v2.6.1
+ configSecret:
+ name: aws-variables # This will additionally populate the default set of feature gates for the provider inside the secret
+ variables:
+ EXP_MACHINE_POOL: "true"
+ EXP_EXTERNAL_RESOURCE_GC: "true"
+ CAPA_LOGLEVEL: "4"
+ manager:
+ syncPeriod: "5m"
+----
+
+== Deleting providers
+
+To remove the installed providers and all related Kubernetes objects just delete the following CRs:
+
+[source,bash]
+----
+kubectl delete coreprovider cluster-api
+kubectl delete infrastructureprovider aws
+----
diff --git a/docs/v0.15.0/modules/en/pages/tasks/capi-operator/basic_cluster_api_provider_installation.adoc b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/basic_cluster_api_provider_installation.adoc
new file mode 100644
index 00000000..711c93a7
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/basic_cluster_api_provider_installation.adoc
@@ -0,0 +1,4 @@
+= Basic Cluster API Provider Installation
+:sidebar_position: 1
+
+This section describes the basic process of installing `CAPI` providers using the operator and a basic configuration of AWS Infrastructure provider. For https://cluster-api-operator.sigs.k8s.io/03_topics/03_basic-cluster-api-provider-installation/#basic-cluster-api-provider-installation[section] details please refer to official CAPI Operator documentation.
diff --git a/docs/v0.15.0/modules/en/pages/tasks/capi-operator/capiprovider_resource.adoc b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/capiprovider_resource.adoc
new file mode 100644
index 00000000..d067fc7e
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/capiprovider_resource.adoc
@@ -0,0 +1,63 @@
+= CAPIProvider Resource
+:sidebar_position: 2
+
+The `CAPIProvider` resource allows managing Cluster API Operator manifests in a declarative way. It is used to provision and configure Cluster API providers like AWS, vSphere etc.
+
+`CAPIProvider` follows a GitOps model - the spec fields are declarative user inputs. The controller only updates status.
+
+Every field provided by the upstream CAPI Operator resource for the desired `spec.type` is also available in the spec of the `CAPIProvider` resouce. Feel free to refer to upstream configuration https://cluster-api-operator.sigs.k8s.io/03_topics/02_configuration/[guides] for advanced scenarios.
+
+https://github.com/rancher/turtles/blob/main/docs/adr/0007-rancher-turtles-public-api.md[ARD]
+
+== Usage
+
+To use the `CAPIProvider` resource:
+
+. Create a `CAPIProvider` resource with the desired provider name, type, credentials, configuration, and features.
+. The `CAPIProvider` controller will handle templating the required Cluster API Operator manifests based on the `CAPIProvider` spec.
+. The status field on the `CAPIProvider` resource will reflect the state of the generated manifests.
+. Manage the `CAPIProvider` object declaratively to apply changes to the generated provider manifests.
+
+Here is an example `CAPIProvider` manifest:
+
+[source,yaml]
+----
+apiVersion: turtles-capi.cattle.io/v1alpha1
+kind: CAPIProvider
+metadata:
+ name: aws-infra
+ namespace: default
+spec:
+ name: aws
+ type: infrastructure
+ credentials:
+ rancherCloudCredential: aws-creds # Rancher credentials secret for AWS
+ configSecret:
+ name: aws-config
+ features:
+ clusterResourceSet: true
+----
+
+This will generate an AWS infrastructure provider with the supplied mapping for rancher credential secret and custom enabled features.
+
+The `CAPIProvider` controller will own all the generated provider resources, allowing garbage collection by deleting the `CAPIProvider` object.
+
+== Specification
+
+The key fields in the `CAPIProvider` spec are:
+
+* `name` - Name of the provider (aws, vsphere etc). Inherited from `metadata.name` if is not specified.
+* `type` - Kind of provider resource (infrastructure, controlplane etc)
+* `credentials` - Source credentials for provider specification
+* `configSecret` - Name of the provider config secret, where the variables and synced credential will be stored. By default if not specified, will inherit the name of the `CAPIProvider` resource
+* `features` - Enabled provider features
+* `variables` - Variables is a map of environment variables to add to the content of the `configSecret`
+
+Full documentation on the CAPIProvider resource - https://doc.crds.dev/github.com/rancher/turtles/turtles-capi.cattle.io/CAPIProvider/v1alpha1@v0.5.0[here].
+
+== Deletion
+
+When a `CAPIProvider` resource is deleted, the kubernetes garbage collection will clean up all the generated provider resources that it owns. This includes:
+
+* Cluster API Operator resource instance
+* Secret referenced by the `configSecret`
diff --git a/docs/v0.15.0/modules/en/pages/tasks/capi-operator/clusterctlconfig_resource.adoc b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/clusterctlconfig_resource.adoc
new file mode 100644
index 00000000..926d49e7
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/clusterctlconfig_resource.adoc
@@ -0,0 +1,46 @@
+= ClusterctlConfig Resource
+
+The `ClusterctlConfig` resource allows managing overrides for clusterctl (CAPI Operator) configurations in a declarative way. It is used to configure clusterctl providers and their urls, as well as version restrictions.
+
+`ClusterctlConfig` follows a GitOps model - the spec fields are declarative user inputs. Turtles does not create or update the resource, it is up to user to specify provider url overrides and maintain its state. It takes precedence over embedded defaults or `clusterctl` default set of provider definitions.
+
+https://github.com/rancher/turtles/blob/main/docs/adr/0012-clusterctl-provider.md[ADR]
+
+== Usage
+
+To use the `ClusterctlConfig` resource:
+
+. Create a `ClusterctlConfig` resource with the `clusterctl-config` name in the `turtles` namespace.
+. The `ClusterctlConfig` controller will handle updates for the `ConfigMap` mounted to the `cluster-api-operator` with the required clusterctl configuration based on the `ClusterctlConfig` spec.
+. Manage the `ClusterctlConfig` object declaratively to apply changes to the generated provider configurations. It may require some time for changes to take effect, as `kubelet` takes care of updating mounted point based on `ConfigMap` state.
+
+Here is an example `ClusterctlConfig` manifest:
+
+[,yaml]
+----
+apiVersion: turtles-capi.cattle.io/v1alpha1
+kind: ClusterctlConfig
+metadata:
+ name: clusterctl-config
+ namespace: rancher-turtles-system
+spec:
+ providers:
+ - name: metal3
+ url: https://github.com/metal3-io/cluster-api-provider-metal3/releases/v1.7.1/infrastructure-components.yaml
+ type: InfrastructureProvider
+----
+
+This example will generate a clusterctl configuration for the `metal3` provider with the specified URL and type.
+
+== Specification
+
+The key fields in the `ClusterctlConfig` spec are:
+
+* `providers[].name` - Name of the provider (e.g. metal3)
+* `providers[].url` - URL of the provider configuration (e.g.
+https://github.com/metal3-io/cluster-api-provider-metal3/releases/v1.7.1/infrastructure-components.yaml). This can use `latest` release, if supported, or pin the maximum version to `v1.7.1` for example.
+* `providers[].type` - Type of the provider (e.g. InfrastructureProvider)
+
+== Deletion
+
+When a `ClusterctlConfig` resource is deleted, the config map is reverted to its original state, managed by turtles.
diff --git a/docs/v0.15.0/modules/en/pages/tasks/capi-operator/installing_core_provider.adoc b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/installing_core_provider.adoc
new file mode 100644
index 00000000..f9549f9d
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/capi-operator/installing_core_provider.adoc
@@ -0,0 +1,26 @@
+= Installing the CoreProvider using CAPIProvider resource
+:sidebar_position: 4
+
+This section describes how to install the `CoreProvider` via `CAPIProvider`, which is responsible for managing the Cluster API CRDs and the Cluster API controller.
+
+[NOTE]
+====
+Please refer to installing Core Provider https://cluster-api-operator.sigs.k8s.io/03_topics/03_basic-cluster-api-provider-installation/01_installing-core-provider#installing-the-coreprovider[section] in CAPI Operator docs for additional details on raw `CoreProvider` resource installation.
+
+Only one CoreProvider can be installed at the same time on a single cluster.
+====
+
+
+_Example:_
+
+[source,yaml]
+----
+apiVersion: turtles-capi.cattle.io/v1alpha1
+kind: CAPIProvider
+metadata:
+ name: cluster-api
+ namespace: capi-system
+spec:
+ version: v1.7.7
+ type: core # required
+----
diff --git a/docs/v0.15.0/modules/en/pages/tasks/intro.adoc b/docs/v0.15.0/modules/en/pages/tasks/intro.adoc
new file mode 100644
index 00000000..0dc0d195
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/intro.adoc
@@ -0,0 +1,4 @@
+= Introduction
+:sidebar_position: 1
+
+In this section we cover additional operation tasks.
diff --git a/docs/v0.15.0/modules/en/pages/tasks/maintenance/automigrate_to_v3_import.adoc b/docs/v0.15.0/modules/en/pages/tasks/maintenance/automigrate_to_v3_import.adoc
new file mode 100644
index 00000000..faa948a6
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/maintenance/automigrate_to_v3_import.adoc
@@ -0,0 +1,18 @@
+= Auto-migration to v3 cluster import
+:sidebar_position: 3
+
+[CAUTION]
+====
+Please read the step described in xref:./import_controller_upgrade.adoc#_context[import controller upgrade], and consider how you want to perform your migration. Feature described here will automatically migrate all old clusters in one go, replacing them with v3 cluster objects.
+====
+
+
+== Context
+
+The goal of the feature is to enable an automatic migration path for users still using v1 cluster import functionality or those currently migrating to v3 clusters. To achieve this, Turtles provides a separate feature flag named `managementv3-cluster-migration`. This option is *disabled* by default, but can be enabled during installation by enabling both checkmarks from the list:
+
+image::image.png[automigrate feature]
+
+== Functionality
+
+The intended behavior for this feature flag is described in the https://github.com/rancher/turtles/blob/main/docs/adr/0011-v1-to-v3-migration.md[ADR]. In summary, when enabled, this flag ensures that cluster imports are not duplicated during upgrade to `management/v3` or downgrade from `management/v3` back to `provisioning/v1`. This prevents duplication of cluster imports and automatically re-imports previously imported CAPI cluster definitions.
diff --git a/docs/v0.15.0/modules/en/pages/tasks/maintenance/early_adopter_upgrade.adoc b/docs/v0.15.0/modules/en/pages/tasks/maintenance/early_adopter_upgrade.adoc
new file mode 100644
index 00000000..9ea99282
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/maintenance/early_adopter_upgrade.adoc
@@ -0,0 +1,92 @@
+= Upgrade Instructions for Early Adopters
+
+If you were part of the early adopter programme and using {product_name} v0.5.x or older, then you need to follow these instructions to use the GA version.
+
+[NOTE]
+====
+The early adopter programme was for non-production use. If you were using Turtles in a production, please contact Rancher support before upgrading.
+====
+
+
+== Overview
+
+To upgrade to the GA version of {product_name}, you will need to do the following:
+
+. Delete any CAPI child cluster definitions
+. Helm uninstall {product_name}
+. Clean-up orphaned resources
+. Install the GA version
+
+== Steps
+
+[WARNING]
+====
+These steps are destructive to child clusters created using CAPI. If you have any concerns, contact Rancher support before proceeding.
+====
+
+
+. Delete any CAPI child cluster definitions and wait for CAPI to fully delete the child clusters
+. Delete the CAPI providers installed:
++
+[source,bash]
+----
+kubectl delete capiproviders.turtles-capi.cattle.io -n capi-kubeadm-control-plane-system kubeadm-control-plane
+kubectl delete capiproviders.turtles-capi.cattle.io -n capi-kubeadm-bootstrap-system kubeadm-bootstrap
+kubectl delete capiproviders.turtles-capi.cattle.io -n capi-system cluster-api
+kubectl delete ns capi-kubeadm-control-plane-system
+kubectl delete ns capi-kubeadm-bootstrap-system
+kubectl delete ns capi-system
+----
++
+. Run the following to uninstall the Turtles extension:
++
+[source,bash]
+----
+helm uninstall -n rancher-turtles-system rancher-turtles --cascade foreground --wait
+kubectl delete ns rancher-turtles-system
+----
++
+. Run the following to delete any orphaned resources:
++
+[source,bash]
+----
+kubectl delete deployments.apps/capi-controller-manager -n capi-system --ignore-not-found=true
+kubectl delete deployments.apps/capi-kubeadm-bootstrap-controller-manager -n capi-kubeadm-bootstrap-system --ignore-not-found=true
+kubectl delete deployments.apps/capi-kubeadm-control-plane-controller-manager -n capi-kubeadm-control-plane-system --ignore-not-found=true
+kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io capi-validating-webhook-configuration capi-kubeadm-bootstrap-validating-webhook-configuration capi-kubeadm-control-plane-validating-webhook-configuration --ignore-not-found=true
+kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io capi-mutating-webhook-configuration capi-kubeadm-bootstrap-mutating-webhook-configuration capi-kubeadm-control-plane-mutating-webhook-configuration --ignore-not-found=true
+----
++
+[TIP]
+====
+If you are not going to continue using the extension then do the next step, instead go to the <<_re_enable_embedded_capi,re-enable embedded CAPI>> section.
+====
++
+. Follow the instructions to install the new version of the extension xref:../../getting-started/install-rancher-turtles/using_rancher_dashboard.adoc[here].
+
+== Re-enable Embedded CAPI
+
+[NOTE]
+====
+This step is only required if you are not going to use the {product_name} extension any further.
+====
+
+
+. Create a feature.yaml file, with embedded-cluster-api set to true:
++
+[source,yaml]
+----
+apiVersion: management.cattle.io/v3
+kind: Feature
+metadata:
+ name: embedded-cluster-api
+spec:
+ value: true
+----
++
+. Use kubectl to apply the feature.yaml file to the cluster:
++
+[source,bash]
+----
+kubectl apply -f feature.yaml
+----
diff --git a/docs/v0.15.0/modules/en/pages/tasks/maintenance/import_controller_upgrade.adoc b/docs/v0.15.0/modules/en/pages/tasks/maintenance/import_controller_upgrade.adoc
new file mode 100644
index 00000000..5bedfa20
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/maintenance/import_controller_upgrade.adoc
@@ -0,0 +1,132 @@
+= Upgrade Turtles import controller
+:sidebar_position: 2
+
+== Context
+
+When Turtles imports a CAPI cluster into Rancher, a number of Kubernetes resources must be created to represent the CAPI cluster in Rancher. The main resource is the one representing the CAPI cluster as a Rancher cluster, so it can be viewed and managed via Rancher:
+
+----
+clusters.provisioning.cattle.io
+----
+
+We also call this resource a `v1` cluster. If you were using Turtles before `v0.9.0`, the default import controller was based on generating this resource for every CAPI cluster that is imported into Rancher. The controller we will be migrating to was disabled behind a feature gate `managementv3_cluster`.
+
+Starting with Turtles `v0.9.0`, the `v3` import controller is no longer behind a feature gate and represents the default import logic. This means we are moving to a different strategy for importing clusters, and switching to creating a different type of Rancher cluster resource:
+
+----
+clusters.management.cattle.io
+----
+
+Which you may also see referred to as `v3` cluster.
+
+Migrating from one controller to the other should be transparent but you will need to prepare your already imported clusters for migration to prevent the new controller from duplicating resources. To make the migration procedure even easier, you can use the provided <<_using_the_migration_script_per_namespace,migration script>> which will let you specify the namespace where your imported clusters exist and will migrate them automatically. If you prefer, you can <<_manually_editing_resources_per_cluster,apply changes to resources manually>> per cluster via kubectl.
+
+[NOTE]
+====
+Both cluster resources previously described are required to represent any Rancher clusters. This controller update only means that we'll be creating one from Turtles and Rancher will take care of provisioning the other.
+====
+
+
+== Using the migration script - per namespace
+
+This method provides an automated process to migrate clusters in batches per namespace. To prepare your imported CAPI clusters for migration via https://github.com/rancher/turtles/tree/main/scripts/import-controller-migration.sh[migration script], you need to follow the steps below:
+
+. Select the namespace where your imported clusters are deployed.
+. Pass the namespace as argument to the migration script: this will prepare *ALL* CAPI clusters in the namespace for migration. If you want to migrate each cluster independently, we recommend following the <<_manually_editing_resources_per_cluster,manual procedure>>.
++
+[source,bash]
+----
+./import_controller_migration.sh
+----
+
+. You can pass as many namespaces as arguments as you need.
+. Before proceeding, you will be prompted for confirmation on the namespaces you provided.
+. For each namespace, it automatically fetches `v1` CAPI clusters that are not already migrated (an annotation is set on the resource after it is successfully prepared for migration).
+ ** `v1` clusters include a reference to `v3` clusters via the `status.clusterName` field.
+ ** Labels `cluster-api.cattle.io/capi-cluster-owner` and `cluster-api.cattle.io/capi-cluster-owner-ns` are added to `v3` cluster.
+ ** After successfully adding labels on `v3` cluster resource, `v1` cluster is annotated as migrated via `cluster-api.cattle.io/migrated=true`.
+
+== Manually editing resources - per cluster
+
+[CAUTION]
+====
+Note that manually editing Kubernetes resources may cause unexpected failures due to wrong or missing values. If you are not confident with `kubectl` and interacting with the Kubernetes API and its resources, we recommend opting for the <<_using_the_migration_script_per_namespace,scripted migration>>.
+====
+
+
+The requirement for migrating imported clusters is setting the labels used by the new controller on the `clusters.management.cattle.io` (v3) cluster resource. It is important to mark `v1` clusters as migrated after adding the labels, as this is the method that prevents the new controller from duplicating Rancher cluster resources.
+
+. Select the cluster/s you would like to prepare for migration. We'll use `cluster1` as an example for this guide.
+ ** We can list the CAPI cluster resource `clusters.cluster.x-k8s.io`. We need to provide the corresponding namespace as this is a namespaced resource.
++
+[source,bash]
+----
+ kubectl get clusters.cluster.x-k8s.io -n capi-ns
+----
++
+In our example we can see `cluster1`:
++
+[source,bash]
+----
+ NAMESPACE NAME PHASE AGE VERSION
+ capi-ns cluster1 Provisioned 6d1h
+----
+
+ ** This CAPI cluster, when imported to Rancher, becomes linked to a Rancher cluster resource, represented by both `clusters.provisioning.cattle.io` and `clusters.management.cattle.io`. For now we're interested in `clusters.provisioning.cattle.io` or `v1` cluster.
++
+[source,bash]
+----
+ kubectl get clusters.provisioning.cattle.io -n capi-ns
+----
++
+And we get the Rancher cluster that Turtles generated to represent the CAPI cluster. You can see that the `-capi` suffix is added to the resource name, which effectively allows us to filter CAPI clusters from all Rancher clusters (which may be provisioned using other mechanisms).
++
+[source,bash]
+----
+ NAME READY KUBECONFIG
+ cluster1-capi
+----
+. `v3` cluster resources are assigned a randomly generated name and we need to find the resource that is linked to the `v1` cluster we retrieved in the previous step. We will do this by exploring the values of the resource, specifically `status.clusterName`.
++
+[source,bash]
+----
+kubectl get clusters.provisioning.cattle.io -n capi-ns cluster1-capi -o jsonpath='{.status.clusterName}'
+----
++
+The resource we get is `c-m-xyz1a2b3`, which is the one we'll be labelling.
+
+. Now that we know what resource to label, we can focus on what labels are required. These labels are used by the new controller to watch cluster resources.
++
+[source,bash]
+----
+cluster-api.cattle.io/capi-cluster-owner # name of the cluster
+cluster-api.cattle.io/capi-cluster-owner-ns # name of the namespace where the cluster lives
+----
++
+
+[CAUTION]
+====
+Note that the `-capi` suffix must be stripped when assigning cluster name to `cluster-api.cattle.io/capi-cluster-owner`
+====
+
+ a. Edit resource and apply labels.
++
+[source,bash]
+----
+ kubectl label clusters.management.cattle.io c-m-xyz1a2b3 cluster-api.cattle.io/capi-cluster-owner=cluster1
+ kubectl label clusters.management.cattle.io c-m-xyz1a2b3 cluster-api.cattle.io/capi-cluster-owner-ns=capi-ns
+----
++
+b. You can now validate that the changes have been successfully applied.
++
+[source,bash]
+----
+ kubectl get clusters.management.cattle.io c-m-xyz1a2b3 -o jsonpath='{.metadata.labels}'
+----
+
+. After the `clusters.management.cattle.io` `v3` resource has been updated, it is very important to mark the `clusters.provisioning.cattle.io` `v1` resource as migrated, so the controller behaves as expected. We do this by adding the annotation `cluster-api.cattle.io/migrated`
++
+[source,bash]
+----
+kubectl annotate clusters.provisioning.cattle.io -n capi-ns cluster1-capi cluster-api.cattle.io/migrated=true
+----
diff --git a/docs/v0.15.0/modules/en/pages/tasks/provider-certification/intro.adoc b/docs/v0.15.0/modules/en/pages/tasks/provider-certification/intro.adoc
new file mode 100644
index 00000000..5d4f6644
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/provider-certification/intro.adoc
@@ -0,0 +1,27 @@
+= What is a Certified Provider?
+:sidebar_position: 1
+
+As most CAPI providers are upstream projects maintained by the open community, there is no safe way to guarantee that any clusters provisioned with a given provider can be imported into Rancher via Turtles. However, we do implement a certification process for those providers that:
+
+* Are actively tested as part of our E2E suites.
+* These tests are kept up-to-date to validate recent versions of the provider.
+* Satisfy the prerequisites in the certification process.
+
+== Certify your custom provider
+
+Additionally, if you are a provider developer or simply want to use a different provider that is not listed as certified, you can xref:../../reference-guides/test-suite/intro.adoc[reuse] the existing Turtles E2E suite to get started with the certification status request or simply verify that Turtles is a viable solution for you to use Rancher and CAPI together. You can read about the certification process and requirements in the xref:./process.adoc[Provider Certification Guide].
+
+[discrete]
+==== Why would I want to validate my provider?
+
+The number of CAPI providers keeps growing and the community is coming up with new projects for different infrastructure hosts. This openness helps enrich the CAPI ecosystem but makes it impossible to control and test all providers Turtles will work with across the whole CAPI project. Developers of Turtles will maintain a xref:../../reference-guides/providers/certified.adoc[list of certified providers] that are actively tested and validated by CI mechanisms but this list will be limited to key and well-known providers. That is why we encourage users of providers not listed to reuse Turtles' test suite and validate the integration between the provider and Turtles.
+
+[discrete]
+==== Can I use Turtles with an uncertified provider?
+
+Turtles is a project that aims to be agnostic and integrate with the whole CAPI ecosystem and you are free to use it without validating any providers. Chances are you will probably find no issues with most upstream projects.
+
+[discrete]
+==== What is the difference between certified and supported?
+
+As CAPI providers are projects maintained by the Kubernetes community, we cannot guarantee support for a given provider. This is why we opt for focusing on constant validation of xref:../../reference-guides/providers/certified.adoc[Certified Providers] and offer users the possibility of integrating with the existing test suite to verify their providers of choice.
diff --git a/docs/v0.15.0/modules/en/pages/tasks/provider-certification/process.adoc b/docs/v0.15.0/modules/en/pages/tasks/provider-certification/process.adoc
new file mode 100644
index 00000000..d4b7afa0
--- /dev/null
+++ b/docs/v0.15.0/modules/en/pages/tasks/provider-certification/process.adoc
@@ -0,0 +1,37 @@
+= Provider Certification Guide
+:sidebar_position: 1
+
+The process of certification is based on verifying {product_name} integration with CAPI providers. To simplify this task, we prepared a generic test that validates the provisioning and importing of a downstream CAPI cluster.
+
+[TIP]
+====
+We recommend you refer to this https://github.com/rancher-sandbox/turtles-integration-suite-example[example] on how to use Turtles' test suite.
+====
+
+
+== Test & Certify your provider
+
+The first step in validating that your provider is compatible with Turtles and that you can provision CAPI clusters and import them into Rancher via Turtles is to integrate with our test suite. We provide a repository with an https://github.com/rancher-sandbox/turtles-integration-suite-example[integration example] that you can use as a reference for your integration.
+
+Turtles as a project contains a https://github.com/rancher/turtles/tree/main/test/e2e/suites[number of suites] to verify different features and processes but, for provider certification, we require you to run only one test that uses a GitOps flow. Turtles is a project that integrates well with a GitOps approach for cluster provisioning and that is why this is our primary way of validating provider integration with Rancher. Running the full suite for a given CAPI provider will:
+
+* Create a management cluster in the desired environment.
+* Install Rancher and Turtles with all prerequisites.
+* Install Gitea.
+* Run the suite that will create a git repo, apply cluster template using Fleet and verify the cluster is created and successfully imported in Rancher.
+
+=== Test configuration
+
+To successfully run the test suite, you will have to provide a number of environment variables. Some of these are agnostic, which means they are required for any provider you want to test, but others will be specific for the provider you are validating. Please, be aware of the particular specifications of the provider being tested, such as credentials, endpoints, etc.
+
+[TIP]
+====
+Next, we recommend you read the xref:../../reference-guides/test-suite/usage.adoc[Test suite guide]
+====
+
+
+== Request for certification
+
+Integrating with Turtles' test suite and running checks on your provider of interest is enough to validate that it is compatible with {product_name}. As it is not feasible for us to continuously test every CAPI provider, this certification workflow will allow you as a user to verify the expected functionality. However, as we are not actively testing newer iterations of the provider (with newer versions of Turtles), the support and guarantee for the given provider is limited, and you will be responsible for validating future releases.
+
+If, after successfully running checks on your provider, you would like to request it be added to the table of xref:../../reference-guides/providers/certified.adoc[Certified Providers], hence being added to the project's periodic E2E suite, you can request so via GitHub issue, using the https://github.com/rancher/turtles/issues/new/choose[Request for Certification template]. The proposal will be studied by the community and we will decide on the feasibility of adding the provider to the certification matrix.
diff --git a/turtles-local-playbook.yml b/turtles-local-playbook.yml
index 29200663..8ba90e89 100644
--- a/turtles-local-playbook.yml
+++ b/turtles-local-playbook.yml
@@ -1,12 +1,12 @@
site:
title: Rancher Turtles
url: /
- start_page: v0.14@turtles:en:index.adoc
+ start_page: v0.15.0@turtles:en:index.adoc
content:
sources:
- url: ./
- start_paths: [docs/v0.14, docs/v0.13, docs/v0.12, docs/v0.11]
+ start_paths: [docs/v0.15.0, docs/v0.14, docs/v0.13, docs/v0.12, docs/v0.11]
# Description: SUSE UI bundle
ui: