Skip to content

Commit

Permalink
[CE-370] Make ansible agent run on existing k8s
Browse files Browse the repository at this point in the history
Ansible agent currently can only deploy fabric onto
k8s environment it sets up. This patch will allow a
user to use any existing k8s environment to stand up
a fabric network

Change-Id: Iad486f5d2802fa93d1ddd24358bca512c3859643
Signed-off-by: Tong Li <litong01@us.ibm.com>
Co-authored-by: Surya <suryalnvs@gmail.com>
  • Loading branch information
Tong Li and suryalnvs committed May 23, 2018
1 parent 38f52a8 commit 88c7a6a
Show file tree
Hide file tree
Showing 15 changed files with 281 additions and 250 deletions.
25 changes: 25 additions & 0 deletions docs/worker_ansible_howto.md
Original file line number Diff line number Diff line change
Expand Up @@ -552,6 +552,31 @@ Ansible controller node. Group `etcdnodes` should list all the servers that you
etcd services on. Group `builders` should contain just one server that you wish to use to build
Hyperledger Fabric artifacts such as executables and docker images.

## <a name="use-the-existing-k8s"></a>Using existing k8s cluster

Ansible agent allows you to deploy fabric network onto an existing k8s cluster. To do that,
you only need to place your k8s configuration file and possibly certificates in the ansible
agent vars directory, the kube configuration file must be named kubeconfig. Then you can
use exactly same fabric network configuration file such as bc1st.yml, vb1st.yml file to
stand up your own fabric network. You can use the sample configuration file like bc1st.yml or
you can use these sample configuration as a starting point to create new ones. Then you simply
run the following command to stand up your own fabric network.

```
ansible-playbook -e "mode=apply env=bc1st deploy_type=k8s" setupfabric.yml
```

The above command will use the vars/kubeconfig file to deploy fabric network defined in
vars/bc1st.yml file.

To destroy what you just created, run the following command::

```
ansible-playbook -e "mode=destroy env=bc1st deploy_type=k8s" setupfabric.yml
```

You can also choose to use the ansible agent container to run the above command.

## <a name="srrwy"></a>Required Ports And Security Considerations

When you work with the public cloud, it is important to open or close certain
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
---
{% set project_version = fabric.baseimage_tag %}
{% if allorgs | length > 0 %}
Organizations:
{% for org in allorgs %}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
---
logging:
{% set project_version = fabric.baseimage_tag %}
{% if project_version | version_compare('1.1.0','>=') %}
level: info
{% else %}
Expand Down
229 changes: 92 additions & 137 deletions src/agent/ansible/roles/deploy_k8s/fabricsetup/tasks/apply.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
---
- name: Setup and initialize variables
set_fact:
current_host: "{{ hostvars[inventory_hostname].inter_name }}"
fabricworkdir: "/opt/gopath/{{ env }}/fabric"
fabricpath: "/opt/gopath/{{ env }}/src/github.com/hyperledger/fabric"
gopath: "/opt/gopath/{{ env }}"
peers: "{{ [] }}"
orderers: "{{ [] }}"
Expand All @@ -23,7 +23,6 @@
allpeers: "{{ [] }}"
clihost: ""
filterstr: ""
k8s_server: "{{ hostvars[groups['allnodes'][0]].private_ip }}"

- name: Make sure that working directory exists and clean
file:
Expand Down Expand Up @@ -88,171 +87,127 @@
'name':item | replace('.', '-') }] }}
with_items: "{{ ordererls }}"

- name: Get peer container list
set_fact:
peers: |
{{ peers + [{'org':item.split('@')[1].split('.')[-1],
'name':item.split('@')[1] | replace('.', '-'),
'role':item.split('@')[0]}] }}
with_items: "{{ fabric.network[current_host].peers | default([]) }}"

- name: Get ca container list
set_fact:
cas: |
{{ cas + [{'org':item.split('.')[-1],
'name':item | replace('.', '-') }] }}
with_items: "{{ fabric.network[current_host].cas | default([]) }}"
- name: Query k8s storage class
command: >-
./kubectl --kubeconfig kubeconfig get storageclass
args:
chdir: "{{ playbook_dir }}/../../vars/"
register: classes

- name: Get orderer container list
- name: Set storage class
set_fact:
orderers: |
{{ orderers + [{'org':item.split('.')[-1],
'name':item | replace('.', '-') }] }}
with_items: "{{ fabric.network[current_host].orderers | default([]) }}"
storageclass: "{{ (classes.stderr.find('No resources found') >= 0) | ternary('', 'default') }}"

- name: Set zookeeper and kafka container list
set_fact:
zookeepers: "{{ fabric.network[current_host].zookeepers | default([]) }}"
kafkas: "{{ fabric.network[current_host].kafkas | default([]) }}"
- name: Create deployment files for certssetup and pvc
template:
src: "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/{{ item }}.j2"
dest: "{{ playbook_dir }}/../../run/{{ item }}.yaml"
with_items:
- pvc
- certssetup

- name: Pull certificates from the builder machine
- name: Create persistent volume
command: >-
scp -i "/opt/gopath/id_rsa" -r -o "StrictHostKeyChecking no" "{{ fabric.
ssh_user }}@{{ hostvars[groups['builders'][0]].private_ip }}:{{ fabricworkdir }}/certs.tgz"
"{{ fabricworkdir }}/allcerts.tgz"
./kubectl --kubeconfig kubeconfig apply -f ../run/pvc.yaml
args:
chdir: "{{ playbook_dir }}/../../vars/"
tags: "createpvc"

- name: Pull container images from the builder machine
- name: Query PVC status
command: >-
scp -i "/opt/gopath/id_rsa" -r -o "StrictHostKeyChecking no" "{{ fabric.
ssh_user }}@{{ hostvars[groups['builders'][0]].private_ip }}:{{ fabricworkdir }}/images/fabricimages.tar"
"{{ fabricworkdir }}/fabricimages.tar"
when: fabric.baseimage_tag == '' and inventory_hostname not in groups['builders']

- name: Pull container version file from the build machine
./kubectl --kubeconfig kubeconfig get -o=custom-columns=STATUS:.status.phase
pvc fabriccerts
args:
chdir: "{{ playbook_dir }}/../../vars/"
register: pvcstatus
until: pvcstatus.stdout.find("Bound") >= 0
retries: 5
delay: 10
tags: "querypvc"

- name: Create certs volume initialization pod
command: >-
scp -i "/opt/gopath/id_rsa" -r -o "StrictHostKeyChecking no" "{{ fabric.
ssh_user }}@{{ hostvars[groups['builders'][0]].private_ip }}:{{ fabricworkdir }}/images/VERSION"
"{{ fabricworkdir }}/VERSION"
- stat:
path: "{{ fabricworkdir }}/fabricimages.tar"
register: imagepack

- name: Load all the docker images created by build machine
shell: >-
docker load -i {{ fabricworkdir }}/fabricimages.tar
when: imagepack.stat.exists == true and inventory_hostname not in groups['builders']
./kubectl --kubeconfig kubeconfig apply -f ../run/certssetup.yaml
args:
chdir: "{{ playbook_dir }}/../../vars/"
tags: "createcertspod"

- name: Find out the image tags
slurp:
src: "{{ fabricworkdir }}/VERSION"
register: imagetag

- name: Set image tag from the file
set_fact:
thetag: "{{ imagetag['content'] | b64decode }}"
helpertag: "{{ fabric.helper_tag }}"

- name: Unpack the certificates
unarchive:
src: "{{ fabricworkdir }}/allcerts.tgz"
dest: "{{ fabricworkdir }}/run"
remote_src: true

- name: Process private key files
template:
src: "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/fabric-ca-server-config.j2"
dest: "{{ fabricworkdir }}/run/keyfiles/{{ item.org }}/ca/fabric-ca-server-config.yaml"
with_items: "{{ cas }}"

- name: Get the peer org list
- name: Query initialization container status
command: >-
./kubectl --kubeconfig kubeconfig get -o=custom-columns=STATUS:.status.phase
pod fabriccertspod
args:
chdir: "{{ playbook_dir }}/../../vars/"
register: pvcstatus
until: pvcstatus.stdout.find("Running") >= 0
retries: 5
delay: 10
tags: "querypvc"

- name: Set peer and orderer to run peer channel create command
set_fact:
peerorgs: "{{ peers | map(attribute='org') | list | unique | sort }}"
clipeer: "{{ allpeers | random }}"
cliorderer: "{{ allorderers | random }}"
when: peerls | length > 0

- name: Get all peer orgs
set_fact:
orgmembers: "{{ peers | map(attribute='org') | list | unique | sort | join(\".member' '\") | trim | replace(' ', ',') }}"
orgmembers: "{{ allpeers | map(attribute='org') | list | unique | sort | join(\".member' '\") | trim | replace(' ', ',') }}"

- name: Create k8s deployment files
- name: Create peer channel command script
template:
src: "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/fabric-pod.j2"
dest: "{{ fabricworkdir }}/run/fabric-pod.yml"
src: "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/dochannel.j2"
dest: "{{ fabricworkdir }}/keyfiles/dochannel.sh"
mode: "u=rx,g=rx"

- name: Get pod filter string from peers and orderes
set_fact:
filterstr : "{{ filterstr + ' -e k8s_' + item.name }}"
- name: Copy certs onto the persistent volume
command: >-
./kubectl --kubeconfig kubeconfig cp {{ item }} fabriccertspod:/fabriccerts
args:
chdir: "{{ playbook_dir }}/../../vars/"
with_items:
- "{{ peers }}"
- "{{ orderers }}"
- "{{ fabricworkdir }}/certs.tgz"
- "{{ fabricworkdir }}/keyfiles/dochannel.sh"
- "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/firstcode.go"
tags: "placecerts"

- name: Get pod filter string from zookeepers and kafkas
set_fact:
filterstr : "{{ filterstr + ' -e k8s_' + item }}"
with_items:
- "{{ zookeepers }}"
- "{{ kafkas }}"
- "fabriccli"
- name: Untar certs.tgz in the Persistent volume
command: >-
./kubectl --kubeconfig kubeconfig exec fabriccertspod -c task-pv-container
-- tar -C /fabriccerts -xzvf /fabriccerts/certs.tgz
args:
chdir: "{{ playbook_dir }}/../../vars/"
tags: "untarcerts"

- name: Pull necessary container images from the docker hub
command: "docker pull {{ fabric.repo.url }}{{ item.name }}"
when: item.flag | length > 0 and fabric.baseimage_tag | length > 0
with_items:
- { name: "fabric-ca:{{ fabric.ca.image_tag | default(thetag) }}", flag: "{{ cas }}" }
- { name: "fabric-zookeeper:{{ helpertag }}", flag: "{{ zookeepers }}" }
- { name: "fabric-kafka:{{ helpertag }}", flag: "{{ kafkas }}" }
- { name: "fabric-couchdb:{{ helpertag }}", flag: "{{ peers }}" }
- { name: "fabric-orderer:{{ thetag }}", flag: "{{ orderers }}" }
- { name: "fabric-peer:{{ thetag }}", flag: "{{ peers }}" }
- { name: "fabric-ccenv:{{ thetag }}", flag: "{{ peers }}" }
- { name: "fabric-tools:{{ thetag }}", flag: "tools" }
tags: "pullimages"
- name: Removing the certs pod
command: >-
./kubectl --kubeconfig kubeconfig delete -f ../run/certssetup.yaml
args:
chdir: "{{ playbook_dir }}/../../vars/"
tags: "deletecertspod"

- name: Create k8s deployment files
template:
src: "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/fabric-pod.j2"
dest: "{{ playbook_dir }}/../../vars/fabric-pod.yml"

- name: Start fabric pods
command: "/opt/fabric/bin/kubectl --server {{ k8s_server }}:8080 create -f {{ fabricworkdir }}/run/fabric-pod.yml"
command: "./kubectl --kubeconfig='kubeconfig' apply -f fabric-pod.yml"
args:
chdir: "{{ playbook_dir }}/../../vars/"
tags: "fabricup"

- name: Locate a host to run peer channel create command
set_fact:
clihost: "{{ item }}"
clipeer: "{{ allpeers | random }}"
cliorderer: "{{ allorderers | random }}"
when: peers | length > 0 and clihost == ""
with_items: "{{ groups['allnodes'] }}"

- name: Make sure that working directory exists and clean
file:
path: "{{ fabricworkdir }}/run/keyfiles/chaincode"
state: "directory"
mode: 0775
when: clihost == inventory_hostname

- name: Move chaincode to the server
copy:
src: "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/firstcode.go"
dest: "{{ fabricworkdir }}/run/keyfiles/chaincode/firstcode.go"
mode: "u=rw,g=rw"
when: clihost == inventory_hostname

- name: Create peer channel command script
template:
src: "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/dochannel.j2"
dest: "{{ fabricworkdir }}/run/keyfiles/dochannel.sh"
mode: "u=rx,g=rx"
when: clihost == inventory_hostname

- name: Create peer channel command k8s deployment file
template:
src: "{{ playbook_dir }}/../deploy_k8s/fabricsetup/templates/cli-k8s.j2"
dest: "{{ fabricworkdir }}/run/cli-k8s.yml"
when: clihost == inventory_hostname
dest: "{{ playbook_dir }}/../../vars/cli-k8s.yml"

- name: Wait for containers to be ready
pause:
seconds: 30

- name: Start fabriccli pod
command: >-
/opt/fabric/bin/kubectl --server {{ k8s_server }}:8080 create
-f {{ fabricworkdir }}/run/cli-k8s.yml
when: clihost == inventory_hostname
command: "./kubectl --kubeconfig='kubeconfig' apply -f cli-k8s.yml"
args:
chdir: "{{ playbook_dir }}/../../vars/"
tags: "cliup"
25 changes: 7 additions & 18 deletions src/agent/ansible/roles/deploy_k8s/fabricsetup/tasks/destroy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,33 +2,22 @@
- name: Setup variables
set_fact:
fabricworkdir: "/opt/gopath/{{ env }}/fabric"
k8s_server: "{{ hostvars[groups['allnodes'][0]].private_ip }}"
clihost: "{{ groups['allnodes'][0] }}"

- name: Stop fabric pods
command: >-
/opt/fabric/bin/kubectl --server {{ k8s_server }}:8080 delete
-f {{ fabricworkdir }}/run/fabric-pod.yml
./kubectl --kubeconfig='kubeconfig' delete -f fabric-pod.yml
args:
chdir: "{{ playbook_dir }}/../../vars/"
tags: "fabricdown"

- name: Stop fabriccli pod
command: >-
/opt/fabric/bin/kubectl --server {{ k8s_server }}:8080 delete
-f {{ fabricworkdir }}/run/cli-k8s.yml
when: clihost == inventory_hostname
./kubectl --kubeconfig='kubeconfig' delete -f cli-k8s.yml
args:
chdir: "{{ playbook_dir }}/../../vars/"
tags: "clidown"

- name: Test if there are any chaincode container images
shell: docker images | grep "dev-*" | cat | awk '{print $1}'
register: images
no_log: true

- name: Remove these chaincode container images
shell: docker images | grep "dev-*" | cat | awk '{print $1}' | xargs docker rmi -f
when: images.stdout != ""
no_log: true

- name: Make sure that working directory is removed
file:
path: "{{ fabricworkdir }}/run"
path: "{{ fabricworkdir }}"
state: absent
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
---
kind: Pod
apiVersion: v1
metadata:
name: fabriccertspod
spec:
volumes:
- name: task-pv-storage
persistentVolumeClaim:
claimName: fabriccerts
containers:
- name: task-pv-container
image: busybox
args:
- sleep
- "3000"
volumeMounts:
- mountPath: "/fabriccerts"
name: task-pv-storage
Loading

0 comments on commit 88c7a6a

Please sign in to comment.