-
Notifications
You must be signed in to change notification settings - Fork 2
/
docker-compose.yml
93 lines (91 loc) · 2.59 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
version: "2.1"
services:
namenode:
image: bde2020/hadoop-namenode:1.1.0-hadoop2.8-java8
container_name: namenode
volumes:
- ./data/namenode:/hadoop/dfs/name
environment:
- CLUSTER_NAME=test
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
- CORE_CONF_hadoop_http_staticuser_user=root
- CORE_CONF_hadoop_proxyuser_hue_hosts=*
- CORE_CONF_hadoop_proxyuser_hue_groups=*
- HDFS_CONF_dfs_webhdfs_enabled=true
- HDFS_CONF_dfs_permissions_enabled=false
healthcheck:
interval: 5s
retries: 100
networks:
- spark-net
datanode:
image: bde2020/hadoop-datanode:1.1.0-hadoop2.8-java8
container_name: datanode
volumes:
- ./data/datanode:/hadoop/dfs/data
environment:
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
depends_on:
namenode:
condition: service_healthy
healthcheck:
interval: 5s
retries: 100
networks:
- spark-net
spark-master:
image: bde2020/spark-master:2.3.1-hadoop2.8
container_name: spark-master
ports:
- "8080:8080"
- "7077:7077"
environment:
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
depends_on:
- namenode
- datanode
networks:
- spark-net
spark-worker:
image: bde2020/spark-worker:2.3.1-hadoop2.8
container_name: spark-worker
environment:
- "SPARK_MASTER=spark://spark-master:7077"
- CORE_CONF_fs_defaultFS=hdfs://namenode:8020
depends_on:
- spark-master
networks:
- spark-net
hue:
image: bde2020/hdfs-filebrowser:3.11
ports:
- 8088:8088
environment:
- NAMENODE_HOST=namenode
- SPARK_MASTER=spark://spark-master:7077
depends_on:
- spark-master
- namenode
networks:
- spark-net
zeppelin:
image: bde2020/zeppelin:0.8.0-hadoop-2.8.0-spark-2.3.1
ports:
- 80:8080
volumes:
- ./notebook:/opt/zeppelin/notebook
- ./examples:/opt/sansa-examples
environment:
CORE_CONF_fs_defaultFS: "hdfs://namenode:8020"
SPARK_MASTER: "spark://spark-master:7077"
MASTER: "spark://spark-master:7077"
SPARK_SUBMIT_OPTIONS: "--jars /opt/sansa-examples/jars/sansa-examples-spark.jar --conf spark.serializer=org.apache.spark.serializer.KryoSerializer --conf spark.kryo.registrator=org.datasyslab.geospark.serde.GeoSparkKryoRegistrator --conf spark.kryo.registrator=net.sansa_stack.owl.spark.dataset.UnmodifiableCollectionKryoRegistrator"
depends_on:
- spark-master
- namenode
networks:
- spark-net
networks:
spark-net:
external:
name: spark-net