version: '3' services: # this container creates certificates used by other services setup: build: ./data/setup/build/. container_name: "setup" restart: "no" hostname: setup volumes: - "./data:/data" networks: - setup-net healthcheck: test: ["CMD-SHELL", "test -f /tmp/healthcheck.txt"] interval: 10s timeout: 5s retries: 5 logging: driver: "json-file" options: max-size: "50m" # avahi mdns broadcasts the name opensearch.local to make the dashboard accessable by this name in your browser mdns: build: ./data/mdns/build/. container_name: "mdns" restart: "no" hostname: mdns volumes: - "./data/mdns/config:/opt/config" network_mode: "host" logging: driver: "json-file" options: max-size: "50m" # reverse proxy used to accept traffic for http/https and nd forward it to the containers traefik: image: "traefik:v2.9.1" container_name: "traefik" hostname: traefik restart: always depends_on: - setup command: #- "--log.level=DEBUG" - "--api.dashboard=true" # enable traefik dashboard - "--api.insecure=true" # URL for traefik dashboard = http://opensearch.local:8080/dashboard/ (needs ports: 8080 to be enabled) - "--global.sendAnonymousUsage=false" - "--providers.docker=true" - "--providers.docker.exposedbydefault=false" - "--entrypoints.http.address=:80" - "--entrypoints.https.address=:443" - "--providers.file.filename=/etc/traefik/encryption.toml" - "--providers.file.watch=true" labels: - traefik.enable=true - traefik.http.routers.traefik.rule=Host(`traefik.local`) - traefik.http.routers.traefik.tls=true - traefik.http.routers.traefik.entrypoints=https - traefik.http.routers.traefik.service=api@internal - traefik.http.routers.traefik.middlewares=traefik-auth-middleware - traefik.http.middlewares.traefik-auth-middleware.basicauth.users=admin:$$apr1$$QIHSR7rW$$fW5DzBnqnCbHP5L2k6kfY0 #admin:vagrant - traefik.http.services.traefik.loadbalancer.server.scheme=http - traefik.http.services.traefik.loadbalancer.server.port=8080 networks: - traefik-net ports: - "80:80" - "443:443" #- "8080:8080" volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - ./data/traefik/config/encryption.toml:/etc/traefik/encryption.toml:ro - ./data/traefik/certs/:/etc/traefik/certs/:ro logging: driver: "json-file" options: max-size: "50m" # Opensearch two node cluster opensearch-node1: image: opensearchproject/opensearch:2.3.0 container_name: opensearch-node1 hostname: opensearch-node1 restart: always depends_on: setup: condition: service_healthy environment: - cluster.name=opensearch-cluster - node.name=opensearch-node1 - discovery.seed_hosts=opensearch-node1,opensearch-node2 - cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2 - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" - plugins.security.ssl.transport.pemkey_filepath=certs/opensearch-node1.key - plugins.security.ssl.transport.pemcert_filepath=certs/opensearch-node1.pem - plugins.security.ssl.transport.pemtrustedcas_filepath=certs/opensearch-ca.pem - plugins.security.ssl.http.pemkey_filepath=certs/opensearch-node1.key - plugins.security.ssl.http.pemcert_filepath=certs/opensearch-node1.pem - plugins.security.ssl.http.pemtrustedcas_filepath=certs/opensearch-ca.pem - cluster.routing.allocation.disk.threshold_enabled=true - cluster.routing.allocation.disk.watermark.low=97% - cluster.routing.allocation.disk.watermark.high=98% - cluster.routing.allocation.disk.watermark.flood_stage=99% #- network.publish_host=192.168.57.2 - DISABLE_INSTALL_DEMO_CONFIG=true - bootstrap.memory_lock=true - plugins.security.ssl.transport.enforce_hostname_verification=false - plugins.security.ssl.transport.resolve_hostname=false ulimits: memlock: soft: -1 hard: -1 nofile: soft: 65536 # maximum number of open files for the OpenSearch user, set to at least 65536 on modern systems hard: 65536 volumes: - ./data/opensearch-node1/data/:/usr/share/opensearch/data - ./data/opensearch-node1/certs/:/usr/share/opensearch/config/certs:ro - ./data/opensearch-node1/config/opensearch.yml:/usr/share/opensearch/config/opensearch.yml:ro - ./data/opensearch-node1/config/internal_users.yml:/usr/share/opensearch/config/opensearch-security/internal_users.yml:ro #ports: # - 9200:9200 # - 9600:9600 # required for Performance Analyzer networks: - opensearch-db-net logging: driver: "json-file" options: max-size: "50m" opensearch-node2: image: opensearchproject/opensearch:2.3.0 container_name: opensearch-node2 hostname: opensearch-node2 restart: always depends_on: setup: condition: service_healthy environment: - cluster.name=opensearch-cluster - node.name=opensearch-node2 - discovery.seed_hosts=opensearch-node1,opensearch-node2 - cluster.initial_cluster_manager_nodes=opensearch-node1,opensearch-node2 - "OPENSEARCH_JAVA_OPTS=-Xms512m -Xmx512m" - plugins.security.ssl.transport.pemkey_filepath=certs/opensearch-node2.key - plugins.security.ssl.transport.pemcert_filepath=certs/opensearch-node2.pem - plugins.security.ssl.transport.pemtrustedcas_filepath=certs/opensearch-ca.pem - plugins.security.ssl.http.pemkey_filepath=certs/opensearch-node2.key - plugins.security.ssl.http.pemcert_filepath=certs/opensearch-node2.pem - plugins.security.ssl.http.pemtrustedcas_filepath=certs/opensearch-ca.pem - cluster.routing.allocation.disk.threshold_enabled=true - cluster.routing.allocation.disk.watermark.low=97% - cluster.routing.allocation.disk.watermark.high=98% - cluster.routing.allocation.disk.watermark.flood_stage=99% #- network.publish_host=192.168.57.2 - DISABLE_INSTALL_DEMO_CONFIG=true - bootstrap.memory_lock=true - plugins.security.ssl.transport.enforce_hostname_verification=false - plugins.security.ssl.transport.resolve_hostname=false ulimits: memlock: soft: -1 hard: -1 nofile: soft: 65536 hard: 65536 volumes: - ./data/opensearch-node2/data/:/usr/share/opensearch/data - ./data/opensearch-node2/certs/:/usr/share/opensearch/config/certs:ro - ./data/opensearch-node2/config/opensearch.yml:/usr/share/opensearch/config/opensearch.yml:ro - ./data/opensearch-node2/config/internal_users.yml:/usr/share/opensearch/config/opensearch-security/internal_users.yml:ro networks: - opensearch-db-net logging: driver: "json-file" options: max-size: "50m" # opensearch dashboards for search and dashboarding opensearch-dashboards: image: opensearchproject/opensearch-dashboards:2.3.0 container_name: opensearch-dashboards hostname: opensearch-node2 restart: always depends_on: setup: condition: service_healthy opensearch-node1: condition: service_started opensearch-node2: condition: service_started environment: OPENSEARCH_HOSTS: '["https://opensearch-node1:9200","https://opensearch-node2:9200"]' OPENSEARCH_USERNAME: "kibanaserver" OPENSEARCH_PASSWORD: "vagrant" labels: - "traefik.enable=true" - "traefik.http.routers.opensearch-dashboards.service=opensearch-dashboards" - "traefik.http.routers.opensearch-dashboards.entrypoints=https" - "traefik.http.routers.opensearch-dashboards.tls=true" - "traefik.http.routers.opensearch-dashboards.rule=Host(`opensearch.local`)" - "traefik.http.services.opensearch-dashboards.loadbalancer.server.port=5601" - "traefik.http.services.opensearch-dashboards.loadbalancer.server.scheme=http" - "traefik.docker.network=traefik-net" volumes: - ./data/opensearch-dashboards/certs/:/usr/share/opensearch-dashboards/config/certs:ro #ports: # - 5601:5601 expose: - "5601" networks: - setup-net - traefik-net - opensearch-db-net logging: driver: "json-file" options: max-size: "50m" # simple logstash listening on port 5044. Install winlogbeat, auditbeat, or packetbeat and send data to this container (5044/tcp -> logstash -> opensearch) beats-logstash: image: opensearchproject/logstash-oss-with-opensearch-output-plugin:8.4.0 container_name: beats-logstash hostname: beats-logstash restart: always depends_on: - opensearch-node1 environment: - OPENSEARCH_HOST=https://opensearch-node1:9200 - LOGSTASH_USER=logstash - LOGSTASH_PASSWORD=${LOGSTASH_PASSWORD:-vagrant} - OPENSEARCH_INDEX=logstash-beats volumes: - ./data/beats-logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro networks: - external-net - opensearch-db-net ports: - 5044:5044 logging: driver: "json-file" options: max-size: "50m" # uses filebeats modules to open syslog ports (network -> filebeat -> logstash -> opensearch) syslog-filebeat: image: elastic/filebeat:8.4.3 container_name: "syslog-filebeat" hostname: syslog-filebeat restart: always depends_on: - syslog-logstash environment: - LOGSTASH_HOST=syslog-logstash:5044 command: ["--strict.perms=false"] volumes: - ./data/syslog-filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml #- ./data/syslog-filebeat/data:/usr/share/filebeat/data # not needed for test environments networks: - external-net - syslog-net ports: - 514:514 # TCP input - 514:514/udp # UDP input - 9001:9001 # Cisco ASA - 9002:9002 # Cisco IOS - 9003:9003 # CEF - 9004:9004 # Checkpoint - 2055:2055 # NetFlow - 2055:2055/udp # NetFlow - 9532:9532 # Snort logging: driver: "json-file" options: max-size: "50m" syslog-logstash: image: opensearchproject/logstash-oss-with-opensearch-output-plugin:8.4.0 container_name: syslog-logstash hostname: syslog-logstash restart: always depends_on: - opensearch-node1 environment: - OPENSEARCH_HOST=https://opensearch-node1:9200 - LOGSTASH_USER=logstash - LOGSTASH_PASSWORD=${LOGSTASH_PASSWORD:-vagrant} - OPENSEARCH_INDEX=logstash-syslog volumes: - ./data/syslog-logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro networks: - syslog-net - opensearch-db-net expose: - "5044" logging: driver: "json-file" options: max-size: "50m" # api demo example. connects to coindesk free api every minute, uses jq as a parsing example, and sends it it through filesbeats to logstash (cron -> file -> filebeat -> logstash -> opensearch) apidemo-cron: build: ./data/apidemo-cron/build/. container_name: "apidemo-cron" hostname: apidemo-cron restart: always depends_on: - apidemo-filebeat environment: - SCHEDULE=* * * * * - USER=root - COMMAND=bash /opt/scripts/get_cryptocurrency.sh volumes: - ./data/apidemo-cron/scripts:/opt/scripts/ - ./data/apidemo-cron/output:/opt/output/ networks: - apidemo-net logging: driver: "json-file" options: max-size: "50m" apidemo-filebeat: image: elastic/filebeat:8.4.3 container_name: "apidemo-filebeat" hostname: apidemo-filebeat restart: always depends_on: - apidemo-logstash environment: - INPUT_PATH=/opt/input/*.json - LOGSTASH_HOST=apidemo-logstash:5044 command: ["--strict.perms=false"] volumes: - ./data/apidemo-filebeat/config/filebeat.yml:/usr/share/filebeat/filebeat.yml - ./data/apidemo-cron/output:/opt/input/ #- ./data/apidemo-filebeat/data:/usr/share/filebeat/data # not needed for test environments networks: - apidemo-net logging: driver: "json-file" options: max-size: "50m" apidemo-logstash: image: opensearchproject/logstash-oss-with-opensearch-output-plugin:8.4.0 container_name: apidemo-logstash hostname: apidemo-logstash restart: always depends_on: - opensearch-node1 environment: - OPENSEARCH_HOST=https://opensearch-node1:9200 - LOGSTASH_USER=logstash - LOGSTASH_PASSWORD=${LOGSTASH_PASSWORD:-vagrant} - OPENSEARCH_INDEX=logstash-demoapi volumes: - ./data/apidemo-logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro networks: - apidemo-net - opensearch-db-net expose: - "5044" logging: driver: "json-file" options: max-size: "50m" grafana: image: grafana/grafana container_name: grafana hostname: grafana restart: always user: root labels: - "traefik.enable=true" - "traefik.http.routers.grafana.service=grafana" - "traefik.http.routers.grafana.entrypoints=https" - "traefik.http.routers.grafana.tls=true" - "traefik.http.routers.grafana.rule=Host(`grafana.local`)" - "traefik.http.services.grafana.loadbalancer.server.port=3000" - "traefik.http.services.grafana.loadbalancer.server.scheme=http" - "traefik.docker.network=traefik-net" volumes: - ./data/grafana/data:/var/lib/grafana environment: default_timezone: 'Europe/Amsterdam' GF_INSTALL_PLUGINS: grafana-piechart-panel,grafana-clock-panel,grafana-simple-json-datasource,grafana-opensearch-datasource GF_SECURITY_ADMIN_USER: admin GF_SECURITY_ADMIN_PASSWORD: vagrant networks: - setup-net - traefik-net - opensearch-db-net expose: - 3000 #ports: # - 3000:3000 networks: setup-net: external-net: traefik-net: name: traefik-net opensearch-dashboards-net: opensearch-db-net: graylog-net: apidemo-net: syslog-net: