forked from mallikprojects/hyperledger-k8
-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
executable file
·139 lines (118 loc) · 3.55 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# Default values for hlf-kube.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
hyperledgerVersion: 1.4.3
# see the Raft sample in the README for how to enable TLS
tlsEnabled: false
# use actual domain names like peer0.atlantis.com instead of internal service names
# this should be set to true for TLS
useActualDomains: false
# adds additional DNS entries to /etc/hosts files
# see https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/#adding-additional-entries-with-hostaliases
# this value should be provided if either tlsEnabled or useActualDomains is set to true
# see the Raft sample in the README for how to use this
hostAliases: []
# common persistence settings
persistence:
storageClass: default
backup:
# initiate backup procedure?
enabled: false
restore:
# initiate restore procedure?
enabled: false
# common ingress settings
ingress:
# all ingress subdomains will be created under this domain
parentDomain:
annotations:
kubernetes.io/ssl-redirect: "true"
certmanager.k8s.io/cluster-issuer: letsencrypt-prod
# peer settings. applies to all peers
peer:
logLevel: debug
# launch peer pods? setting to false is useful for collecting host aliases and fast restart afterwards
launchPods: true
chaincode:
logging:
level: info
shim: info
persistence:
enabled: false
size: 16Gi
backup:
# take backup of peers during backup procedure?
enabled: true
restore:
# restore peers data from backup during restore procedure?
enabled: true
operations:
enabled: false
metrics:
provider: prometheus
# CouchDB settings. applies to all CouchDB's
couchdb:
version: 0.4.15
userName:
password:
persistence:
enabled: false
size: 16Gi
ingress:
enabled: false
backup:
# take backup of CouchDB's during backup procedure?
enabled: true
restore:
# restore CouchDB's data from backup during restore procedure?
enabled: true
# Orderer settings. applies to all Orderer pods
orderer:
# should be greater than 1 only if kafka orderer is used
replicas: 1
logLevel: info
# launch orderer pods? setting to false is useful for collecting host aliases and fast restart afterwards
launchPods: true
persistence:
enabled: false
size: 16Gi
backup:
# take backup of orderers during backup procedure?
enabled: true
restore:
# restore orderers data from backup during restore procedure?
enabled: true
# CA (Certificate Authority) settings. applies to all CA's
ca:
userName: admin
password: adminpw
logLevel: info
ingress:
enabled: false
# kafka settings
hlf-kafka:
# install kafka?
enabled: false
# number of Kafka brokers, should be at least 4
# https://hyperledger-fabric.readthedocs.io/en/release-1.4/kafka.html
replicas: 4
podManagementPolicy: Parallel
# TODO storage classs?
persistence:
enabled: false
storageClass: default
size: 16Gi
configurationOverrides:
"default.replication.factor": 4 # given a 4 node Kafka cluster
"unclean.leader.election.enable": false
"min.insync.replicas": 3 # to permit one Kafka replica to go offline
"message.max.bytes": "103809024" # 99 * 1024 * 1024 B
"replica.fetch.max.bytes": "103809024" # 99 * 1024 * 1024 B
"log.retention.ms": -1 # Since we need to keep logs indefinitely for the HL Fabric Orderer
zookeeper:
# should be 3, 5, or 7
replicaCount: 3
persistence:
enabled: false
storageClass: default
size: 16Gi