forked from RedHatInsights/playbook-dispatcher
-
Notifications
You must be signed in to change notification settings - Fork 0
/
docker-compose.yml
142 lines (132 loc) · 4.08 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# Copied from insights-ingress-go
# This podman compose file stands up local dependencies for
# Kafka, Zookeeper, Minio and insights-ingress-go.
# Please consult its README.md for bucket creation steps
version: "3"
services:
dispatcher:
image: quay.io/cloudservices/playbook-dispatcher
build:
context: .
links:
- kafka
- db
ports:
- '8000:8000'
- '9001:9001'
entrypoint:
- /bin/sh
- -c
- '/app migrate up && /app run'
environment:
CLOWDER_ENABLED: "false"
DB_HOST: "db"
PSK_AUTH_TEST: "xwKhCUzgJ8"
restart: unless-stopped
zookeeper:
image: confluentinc/cp-zookeeper
environment:
- ZOOKEEPER_CLIENT_PORT=32181
- ZOOKEEPER_SERVER_ID=1
kafka:
image: confluentinc/cp-kafka
ports:
- '29092:29092'
depends_on:
- zookeeper
environment:
- KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:29092
- KAFKA_BROKER_ID=1
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1
- KAFKA_ZOOKEEPER_CONNECT=zookeeper:32181
- KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
minio:
image: minio/minio
command: server /data --console-address ":10000"
volumes:
# These vars are defined in .env
# These are configurable
# Ensure the directories exist prior to running this file
- minio_conf:/root/.minio:Z
- minio_data:/data:Z
ports:
- '9000:9000'
- '10000:10000'
environment:
- MINIO_ACCESS_KEY=$MINIO_ACCESS_KEY
- MINIO_SECRET_KEY=$MINIO_SECRET_KEY
minio-createbuckets:
image: minio/mc
depends_on:
- minio
restart: on-failure
entrypoint: >
/bin/sh -c "
/usr/bin/mc config host add myminio http://minio:9000 "$MINIO_ACCESS_KEY" "$MINIO_SECRET_KEY" || exit 1;
/usr/bin/mc mb --ignore-existing myminio/insights-upload-perma;
/usr/bin/mc policy set upload myminio/insights-upload-perma;
"
ingress:
image: quay.io/cloudservices/insights-ingress:1e56a78
ports:
- '8080:3000'
environment:
- INGRESS_STAGEBUCKET=insights-upload-perma
- INGRESS_VALIDTOPICS=playbook,playbook-sat
- OPENSHIFT_BUILD_COMMIT=somestring
- INGRESS_MAXSIZE=104857600
- INGRESS_MINIODEV=true
- INGRESS_MINIOACCESSKEY=$MINIO_ACCESS_KEY
- INGRESS_MINIOSECRETKEY=$MINIO_SECRET_KEY
- INGRESS_MINIOENDPOINT=minio:9000
depends_on:
- kafka
db:
image: quay.io/debezium/postgres:12
restart: always
environment:
POSTGRES_PASSWORD: insights
POSTGRES_USER: insights
POSTGRES_DB: insights
ports:
- "5432:5432"
connect:
build:
context: .
dockerfile: event-streams/Dockerfile
image: quay.io/cloudservices/playbook-dispatcher-connect
links:
- kafka
- db
ports:
- 8083:8083
environment:
KAFKA_CONNECT_BOOTSTRAP_SERVERS: kafka:29092
KAFKA_CONNECT_CONFIGURATION: |
group.id=playbook-dispatcher-connect
key.converter=org.apache.kafka.connect.json.JsonConverter
value.converter=org.apache.kafka.connect.json.JsonConverter
offset.storage.topic=playbook-dispatcher-connect-config
offset.storage.replication.factor=1
offset.storage.partitions=1
status.storage.topic=playbook-dispatcher-connect-status
status.storage.replication.factor=1
status.storage.partitions=1
config.storage.topic=playbook-dispatcher-connect-offsets
config.storage.replication.factor=1
config.storage.partitions=1
config.providers: file
config.providers.file.class: com.redhat.insights.kafka.config.providers.PlainFileConfigProvider
KAFKA_CONNECT_METRICS_ENABLED: "false"
STRIMZI_KAFKA_GC_LOG_ENABLED: "false"
KAFKA_HEAP_OPTS: "-Xms512m -Xmx512m"
command: /opt/kafka/kafka_connect_run.sh
connect-start:
image: quay.io/cloudservices/playbook-dispatcher-connect
links:
- connect
command: "curl -f -i -H 'Content-Type:application/json' -X POST connect:8083/connectors/ -d @/connector-local.json"
restart: on-failure
volumes:
minio_conf: {}
minio_data: {}