forked from hhy5277/dataflux-func
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-stack.example.yaml
More file actions
138 lines (127 loc) · 3.18 KB
/
docker-stack.example.yaml
File metadata and controls
138 lines (127 loc) · 3.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
# 注意事项:
# 1. 数据存储于`/usr/local/dataflux-func/`目录下,
# 部署前应当保证目录已经存在。
# 参考命令:
# sudo mkdir -p /usr/local/dataflux-func/{data,data/extra-python-packages,mysql,redis}
#
# 2. 使用`docker stack`即进行部署。
# 参考命令(假设本配置文件名为"docker-stack.yaml"):
# sudo docker stack deploy dataflux-func -c docker-stack.yaml
#
# 3. 如不需要内置MySQL 组件,请删除MYSQL相关的内容块
# 4. 如不需要内置Redis 组件,请删除REDIS相关的内容块
# 5. 如使用默认方式安装,请删除WORKER MINI 相关的内容块
# 6. 如使用mini 方式安装,请删除WORKER DEFAULT 相关的内容块
version: '3.1'
services:
# MYSQL START
mysql:
image: <MYSQL_IMAGE>
labels:
- mysql
networks:
- datafluxfunc
volumes:
- "<INSTALL_DIR>/mysql:/var/lib/mysql"
environment:
- "MYSQL_ROOT_PASSWORD=<MYSQL_PASSWORD>"
- "MYSQL_DATABASE=dataflux_func"
# command: --innodb-large-prefix=on --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
command: --innodb-large-prefix=on --character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci --performance-schema=off --table-open-cache=400
# MYSQL END
# REDIS START
redis:
image: <REDIS_IMAGE>
labels:
- redis
networks:
- datafluxfunc
volumes:
- "<INSTALL_DIR>/redis:/data"
# REDIS END
# WORKER DEFAULT START
worker-0:
image: <DATAFLUX_FUNC_IMAGE>
labels:
- worker-0
volumes:
- "<INSTALL_DIR>/data:/data"
networks:
- datafluxfunc
- default
environment:
DFF__WORKER_CONCURRENCY: '2'
command: ./run-worker-by-queue.sh 0
worker-1-6:
image: <DATAFLUX_FUNC_IMAGE>
labels:
- worker-1-6
volumes:
- "<INSTALL_DIR>/data:/data"
networks:
- datafluxfunc
- default
command: ./run-worker-by-queue.sh 1 2 3 4 5 6
worker-7:
image: <DATAFLUX_FUNC_IMAGE>
labels:
- worker-7
volumes:
- "<INSTALL_DIR>/data:/data"
networks:
- datafluxfunc
- default
environment:
DFF__WORKER_CONCURRENCY: '2'
command: ./run-worker-by-queue.sh 7
worker-8-9:
image: <DATAFLUX_FUNC_IMAGE>
labels:
- worker-8-9
volumes:
- "<INSTALL_DIR>/data:/data"
networks:
- datafluxfunc
- default
command: ./run-worker-by-queue.sh 8 9
# WORKER DEFAULT END
# WORKER MINI START
worker:
image: <DATAFLUX_FUNC_IMAGE>
labels:
- worker
volumes:
- "<INSTALL_DIR>/data:/data"
networks:
- datafluxfunc
- default
command: ./run-worker.sh
# WORKER MINI END
beat:
image: <DATAFLUX_FUNC_IMAGE>
labels:
- beat
volumes:
- "<INSTALL_DIR>/data:/data"
networks:
- datafluxfunc
- default
command: ./run-beat.sh
server:
image: <DATAFLUX_FUNC_IMAGE>
labels:
- server
volumes:
- "<INSTALL_DIR>/data:/data"
networks:
- datafluxfunc
- default
ports:
- "<PORT>:8088"
command: ./run-server.sh
networks:
default:
external:
name: bridge
datafluxfunc:
driver: overlay