added Malcolm
This commit is contained in:
		
							
								
								
									
										49
									
								
								Vagrant/resources/malcolm/shared/bin/agg-init.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										49
									
								
								Vagrant/resources/malcolm/shared/bin/agg-init.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,49 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| SCRIPT_PATH="$(dirname $(realpath -e "${BASH_SOURCE[0]}"))" | ||||
|  | ||||
| echo "aggregator" > /etc/installer | ||||
|  | ||||
| if [[ -r "$SCRIPT_PATH"/common-init.sh ]]; then | ||||
|   . "$SCRIPT_PATH"/common-init.sh | ||||
|  | ||||
|   # remove default accounts/groups we don't want, create/set directories for non-user users for stig to not complain | ||||
|   CleanDefaultAccounts | ||||
|  | ||||
|   MAIN_USER="$(id -nu 1000)" | ||||
|   if [[ -n $MAIN_USER ]]; then | ||||
|  | ||||
|     # fix some permisions to make sure things belong to the right person | ||||
|     FixPermissions "$MAIN_USER" | ||||
|  | ||||
|     # if Malcolm's config file has never been touched, configure it now | ||||
|     MAIN_USER_HOME="$(getent passwd "$MAIN_USER" | cut -d: -f6)" | ||||
|     if [[ -f "$MAIN_USER_HOME"/Malcolm/firstrun ]]; then | ||||
|       if [[ -r "$MAIN_USER_HOME"/Malcolm/scripts/install.py ]]; then | ||||
|         /usr/bin/env python3 "$MAIN_USER_HOME"/Malcolm/scripts/install.py --configure --defaults --restart-malcolm | ||||
|       fi | ||||
|       rm -f "$MAIN_USER_HOME"/Malcolm/firstrun | ||||
|     fi | ||||
|  | ||||
|     # make sure read permission is set correctly for the nginx worker processes | ||||
|     chmod 644 "$MAIN_USER_HOME"/Malcolm/nginx/htpasswd "$MAIN_USER_HOME"/Malcolm/htadmin/config.ini "$MAIN_USER_HOME"/Malcolm/htadmin/metadata >/dev/null 2>&1 | ||||
|   fi | ||||
|  | ||||
|   # we're going to let wicd manage networking on the aggregator, so remove physical interfaces from /etc/network/interfaces | ||||
|   InitializeAggregatorNetworking | ||||
|  | ||||
|   # block some call-homes | ||||
|   BadTelemetry | ||||
|  | ||||
|   # if we need to import prebuilt Malcolm docker images, do so now (but not if we're in a live-usb boot) | ||||
|   DOCKER_DRIVER="$(docker info 2>/dev/null | grep 'Storage Driver' | cut -d' ' -f3)" | ||||
|   if [[ -n $DOCKER_DRIVER ]] && [[ "$DOCKER_DRIVER" != "vfs" ]] && [[ -r /malcolm_images.tar.gz ]]; then | ||||
|     docker load -q -i /malcolm_images.tar.gz && rm -f /malcolm_images.tar.gz | ||||
|   fi | ||||
|  | ||||
|   exit 0 | ||||
| else | ||||
|   exit 1 | ||||
| fi | ||||
							
								
								
									
										76
									
								
								Vagrant/resources/malcolm/shared/bin/beat-log-temperature.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										76
									
								
								Vagrant/resources/malcolm/shared/bin/beat-log-temperature.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,76 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| import os | ||||
| import re | ||||
| import sys | ||||
| import time | ||||
| import argparse | ||||
| from functools import reduce | ||||
| from sensorcommon import * | ||||
| from sensormetric import * | ||||
| from collections import defaultdict | ||||
|  | ||||
| BEAT_PORT_DEFAULT=9515 | ||||
| BEAT_INTERFACE_IP="127.0.0.1" | ||||
| BEAT_PROTOCOL="udp" | ||||
| BEAT_FORMAT="json" | ||||
|  | ||||
| ################################################################################################### | ||||
| ################################################################################################### | ||||
| def main(): | ||||
|  | ||||
|   # extract arguments from the command line | ||||
|   # print (sys.argv[1:]); | ||||
|   parser = argparse.ArgumentParser(description='beat-log-temperature.py', add_help=False, usage='temperature.py [options]') | ||||
|   parser.add_argument('-p', '--port', dest='port', metavar='<INT>', type=int, nargs='?', default=BEAT_PORT_DEFAULT, help='UDP port monitored by protologbeat') | ||||
|   parser.add_argument('-c', '--count', dest='loop', metavar='<INT>', type=int, nargs='?', default=1, help='Number of times to execute (default = 1, 0 = loop forever)') | ||||
|   parser.add_argument('-s', '--sleep', dest='sleep', metavar='<INT>', type=int, nargs='?', default=10, help='Seconds between iterations if looping (default = 10)') | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', type=str2bool, nargs='?', const=True, default=False, help="Verbose output") | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   # set up destination beat | ||||
|   eprint(f"Logging {BEAT_FORMAT} sensor statistics to {BEAT_INTERFACE_IP}:{args.port} over {BEAT_PROTOCOL}") | ||||
|   beat = HeatBeatLogger(BEAT_INTERFACE_IP, args.port, BEAT_PROTOCOL, BEAT_FORMAT) | ||||
|  | ||||
|   loopCount = 0 | ||||
|   while (args.loop <= 0) or (loopCount < args.loop): | ||||
|  | ||||
|     if (loopCount >= 1): | ||||
|       time.sleep(args.sleep) | ||||
|     loopCount += 1 | ||||
|  | ||||
|     metrics = get_metrics_list() | ||||
|     metrics_dicts = [x.to_dictionary() for x in metrics] | ||||
|     for d in metrics_dicts: | ||||
|       d.pop('value_type', None) | ||||
|  | ||||
|     # get averages for each metric class | ||||
|     metric_class_values = defaultdict(list) | ||||
|  | ||||
|     # put together a list for each class of metric for averaging | ||||
|     for metrics in metrics_dicts: | ||||
|       label_class = metrics["class"] | ||||
|       if (len(label_class) > 0): | ||||
|         metric_class_values[label_class].append(metrics["value"]) | ||||
|  | ||||
|     # average each metric  class | ||||
|     message = {} | ||||
|     for k, v in metric_class_values.items(): | ||||
|       message[f"{k}_avg"] = reduce(lambda a, b: a + b, v) / len(v) | ||||
|  | ||||
|     # send the message | ||||
|     message['sensors'] = metrics_dicts | ||||
|     if args.debug: | ||||
|       eprint(f"Message: {message}") | ||||
|     beat.send_message(message) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										21
									
								
								Vagrant/resources/malcolm/shared/bin/capture-format-wait.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										21
									
								
								Vagrant/resources/malcolm/shared/bin/capture-format-wait.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| function finish { | ||||
|   pkill -f "zenity.*Preparing Storage" | ||||
| } | ||||
|  | ||||
| if [ -f /etc/capture_storage_format.crypt ]; then | ||||
|   CAPTURE_STORAGE_FORMAT_FILE="/etc/capture_storage_format.crypt" | ||||
| else | ||||
|   CAPTURE_STORAGE_FORMAT_FILE="/etc/capture_storage_format" | ||||
| fi | ||||
|  | ||||
| if [[ -f "$CAPTURE_STORAGE_FORMAT_FILE" ]] || pgrep -f "sensor-capture-disk-config.py" >/dev/null 2>&1; then | ||||
|   trap finish EXIT | ||||
|   yes | zenity --progress --pulsate --no-cancel --auto-close --text "Capture storage media are being prepared..." --title "Preparing Storage" & | ||||
|   while [[ -f "$CAPTURE_STORAGE_FORMAT_FILE" ]] || pgrep -f "sensor-capture-disk-config.py" >/dev/null 2>&1; do | ||||
|     sleep 2 | ||||
|   done | ||||
| fi | ||||
							
								
								
									
										130
									
								
								Vagrant/resources/malcolm/shared/bin/common-init.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										130
									
								
								Vagrant/resources/malcolm/shared/bin/common-init.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,130 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| declare -A IFACES | ||||
|  | ||||
| # read all non-virtual interfaces and their speeds into an associative array | ||||
| function PopulateInterfaces() | ||||
| { | ||||
|   IFACE_RESULT="" | ||||
|   SPEED_MAX=0 | ||||
|   while IFS='' read -r -d ' ' IFACE_NAME && IFS='' read -r -d '' IFACE_LINK; do | ||||
|     if [[ "${IFACE_LINK}" != *"virtual"* ]]; then | ||||
|       IFACE_SPEED="$(cat /sys/class/net/$IFACE_NAME/speed 2>/dev/null)" | ||||
|       if [[ -n $IFACE_SPEED ]]; then | ||||
|         IFACES[$IFACE_NAME]+=$IFACE_SPEED | ||||
|       else | ||||
|         IFACES[$IFACE_NAME]+=0 | ||||
|       fi | ||||
|     fi | ||||
|   done < <(find /sys/class/net/ -mindepth 1 -maxdepth 1 -type l -printf '%P %l\0' 2>/dev/null) | ||||
| } | ||||
|  | ||||
| # the capture interface is the one with the highest "speed" | ||||
| function DetermineCaptureInterface() | ||||
| { | ||||
|   for IFACE_NAME in "${!IFACES[@]}"; do | ||||
|     echo "$IFACE_NAME" "${IFACES["$IFACE_NAME"]}" | ||||
|   done | sort -rn -k2 | head -n 1 | cut -d' ' -f1 | ||||
| } | ||||
|  | ||||
| # remove default accounts/groups we don't want, create/set directories for non-user users for stig to not complain | ||||
| function CleanDefaultAccounts() { | ||||
|   for systemuser in games gnats irc list lp news www-data | ||||
|   do | ||||
|     deluser $systemuser 2>/dev/null || true | ||||
|   done | ||||
|   [ ! -d /var/lib/nobody ] && ((mkdir -p /var/lib/nobody && chown nobody:nogroup /var/lib/nobody && chmod 700 /var/lib/nobody && usermod -m -d /var/lib/nobody nobody) || true) | ||||
|   [ ! -d /var/lib/_apt ] && ((mkdir -p /var/lib/_apt && chown _apt:nogroup /var/lib/_apt && chmod 700 /var/lib/_apt && usermod -m -d /var/lib/_apt _apt) || true) | ||||
|   [ ! -d /run/systemd/resolve ] && ((mkdir -p /run/systemd/resolve && chown systemd-resolve:systemd-resolve /run/systemd/resolve && chmod 700 /run/systemd/resolve) || true) | ||||
|   [ ! -d /var/lib/usbmux ] && ((mkdir -p /var/lib/usbmux && chown usbmux:plugdev /var/lib/usbmux && chmod 700 /var/lib/usbmux) || true) | ||||
|   [ ! -d /var/lib/ntp ] && ((mkdir -p /var/lib/ntp && chown ntp:ntp /var/lib/ntp && chmod 700 /var/lib/ntp) || true) | ||||
|   ((mkdir -p /var/lib/systemd-coredump && chown systemd-coredump:nogroup /var/lib/systemd-coredump && chmod 700 /var/lib/systemd-coredump && usermod -m -d /var/lib/systemd-coredump systemd-coredump) || true) | ||||
|   chmod 600 "/etc/crontab" "/etc/group-" "/etc/gshadow-" "/etc/passwd-" "/etc/shadow-" >/dev/null 2>&1 || true | ||||
|   chmod 700 "/etc/cron.hourly" "/etc/cron.daily" "/etc/cron.weekly" "/etc/cron.monthly" "/etc/cron.d" >/dev/null 2>&1 || true | ||||
| } | ||||
| # if the network configuration files for the interfaces haven't been set to come up on boot, configure that | ||||
| function InitializeSensorNetworking() { | ||||
|   unset NEED_NETWORKING_RESTART | ||||
|  | ||||
|   if [[ ! -f /etc/network/interfaces.d/sensor ]]; then | ||||
|     # /etc/network/interfaces.d/sensor can be further configured by the system admin via configure-interfaces.py. | ||||
|     echo "" >> /etc/network/interfaces | ||||
|     echo "# sensor interfaces should be configured in \"/etc/network/interfaces.d/sensor\"" >> /etc/network/interfaces | ||||
|     for IFACE_NAME in "${!IFACES[@]}"; do | ||||
|       echo "auto $IFACE_NAME" >> /etc/network/interfaces.d/sensor | ||||
|       echo "allow-hotplug $IFACE_NAME" >> /etc/network/interfaces.d/sensor | ||||
|       echo "iface $IFACE_NAME inet manual" >> /etc/network/interfaces.d/sensor | ||||
|       echo "  pre-up ip link set dev \$IFACE up" >> /etc/network/interfaces.d/sensor | ||||
|       echo "  post-down ip link set dev \$IFACE down" >> /etc/network/interfaces.d/sensor | ||||
|       echo "" >> /etc/network/interfaces.d/sensor | ||||
|     done | ||||
|     NEED_NETWORKING_RESTART=0 | ||||
|   fi | ||||
|  | ||||
|  | ||||
|   if ! grep --quiet ^TimeoutStartSec=1min /etc/systemd/system/network-online.target.wants/networking.service; then | ||||
|     # only wait 1 minute during boot for network interfaces to come up | ||||
|     sed -i 's/^\(TimeoutStartSec\)=.*/\1=1min/' /etc/systemd/system/network-online.target.wants/networking.service | ||||
|     NEED_NETWORKING_RESTART=0 | ||||
|   fi | ||||
|  | ||||
|   [[ -n $NEED_NETWORKING_RESTART ]] && systemctl restart networking | ||||
| } | ||||
|  | ||||
| function InitializeAggregatorNetworking() { | ||||
|   unset NEED_NETWORKING_RESTART | ||||
|  | ||||
|   # we're going to let wicd manage networking on the aggregator, so remove physical interfaces from /etc/network/interfaces | ||||
|   NET_IFACES_LINES=$(wc -l /etc/network/interfaces | awk '{print $1}') | ||||
|   if [ $NET_IFACES_LINES -gt 4 ] ; then | ||||
|     echo -e "source /etc/network/interfaces.d/*\n\nauto lo\niface lo inet loopback" > /etc/network/interfaces | ||||
|     NEED_NETWORKING_RESTART=0 | ||||
|   fi | ||||
|  | ||||
|   if ! grep --quiet ^TimeoutStartSec=1min /etc/systemd/system/network-online.target.wants/networking.service; then | ||||
|     # only wait 1 minute during boot for network interfaces to come up | ||||
|     sed -i 's/^\(TimeoutStartSec\)=.*/\1=1min/' /etc/systemd/system/network-online.target.wants/networking.service | ||||
|     NEED_NETWORKING_RESTART=0 | ||||
|   fi | ||||
|  | ||||
|   [[ -n $NEED_NETWORKING_RESTART ]] && systemctl restart networking | ||||
| } | ||||
|  | ||||
| # fix some permisions to make sure things belong to the right person | ||||
| function FixPermissions() { | ||||
|   if [ -n "$1" ]; then | ||||
|     USER_TO_FIX="$1" | ||||
|     [ -d /home/"$USER_TO_FIX" ] && find /home/"$USER_TO_FIX" \( -type d -o -type f \) -exec chmod o-rwx "{}" \; | ||||
|     [ -d /home/"$USER_TO_FIX" ] && find /home/"$USER_TO_FIX" -type f -name ".*" -exec chmod g-wx "{}" \; | ||||
|     if [ ! -f /etc/cron.allow ] || ! grep -q "$USER_TO_FIX" /etc/cron.allow; then | ||||
|       echo "$USER_TO_FIX" >> /etc/cron.allow | ||||
|     fi | ||||
|     if [ ! -f /etc/at.allow ] || ! grep -q "$USER_TO_FIX" /etc/at.allow; then | ||||
|       echo "$USER_TO_FIX" >> /etc/at.allow | ||||
|     fi | ||||
|     chmod 644 /etc/cron.allow /etc/at.allow | ||||
|   fi | ||||
| } | ||||
|  | ||||
| # block some call-homes | ||||
| function BadTelemetry() { | ||||
|   if ! grep -q google /etc/hosts; then | ||||
|     echo >> /etc/hosts | ||||
|     echo '127.0.0.1 _googlecast._tcp.local' >> /etc/hosts | ||||
|     echo '127.0.0.1 accounts.google.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 clients.l.google.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 fonts.googleapis.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 safebrowsing-cache.google.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 safebrowsing.clients.google.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 update.googleapis.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 www.google-analytics.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 www.gstatic.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 connectivitycheck.gstatic.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 incoming.telemetry.mozilla.org' >> /etc/hosts | ||||
|     echo '127.0.0.1 detectportal.firefox.com' >> /etc/hosts | ||||
|     echo '127.0.0.1 prod.detectportal.prod.cloudops.mozgcp.net' >> /etc/hosts | ||||
|     echo '127.0.0.1 detectportal.prod.mozaws.net' >> /etc/hosts | ||||
|   fi | ||||
| } | ||||
							
								
								
									
										907
									
								
								Vagrant/resources/malcolm/shared/bin/configure-capture.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										907
									
								
								Vagrant/resources/malcolm/shared/bin/configure-capture.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,907 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| # script for configuring sensor capture and forwarding parameters | ||||
|  | ||||
| import locale | ||||
| import os | ||||
| import re | ||||
| import shutil | ||||
| import sys | ||||
| import fileinput | ||||
| from collections import defaultdict | ||||
| from dialog import Dialog | ||||
| from zeek_carve_utils import * | ||||
| from sensorcommon import * | ||||
|  | ||||
| class Constants: | ||||
|   CONFIG_CAP = 'Capture Configuration' | ||||
|  | ||||
|   DEV_IDENTIFIER_FILE = '/etc/installer' | ||||
|   DEV_UNKNOWN = 'unknown' | ||||
|   DEV_AGGREGATOR = 'aggregator' | ||||
|   DEV_SENSOR = 'sensor' | ||||
|   DEV_VALID = {DEV_AGGREGATOR, DEV_SENSOR} | ||||
|   MSG_ERR_DEV_INVALID = f'Could not determine installation type (not one of {DEV_VALID})' | ||||
|   MSG_ERR_DEV_INCORRECT = 'This tool is not suitable for configuring {}s' | ||||
|  | ||||
|   SENSOR_CAPTURE_CONFIG = '/opt/sensor/sensor_ctl/control_vars.conf' | ||||
|  | ||||
|   PCAP_CAPTURE_AUTOSTART_ENTRIES = {'AUTOSTART_ARKIME', 'AUTOSTART_NETSNIFF', 'AUTOSTART_TCPDUMP'} | ||||
|  | ||||
|   ZEEK_FILE_CARVING_NONE = 'none' | ||||
|   ZEEK_FILE_CARVING_ALL = 'all' | ||||
|   ZEEK_FILE_CARVING_KNOWN = 'known' | ||||
|   ZEEK_FILE_CARVING_MAPPED = 'mapped' | ||||
|   ZEEK_FILE_CARVING_MAPPED_MINUS_TEXT = 'mapped (except common plain text files)' | ||||
|   ZEEK_FILE_CARVING_INTERESTING = 'interesting' | ||||
|   ZEEK_FILE_CARVING_CUSTOM = 'custom' | ||||
|   ZEEK_FILE_CARVING_CUSTOM_MIME = 'custom (mime-sorted)' | ||||
|   ZEEK_FILE_CARVING_CUSTOM_EXT = 'custom (extension-sorted)' | ||||
|   ZEEK_FILE_CARVING_DEFAULTS = '/opt/zeek/share/zeek/site/extractor_params.zeek' | ||||
|   ZEEK_FILE_CARVING_OVERRIDE_FILE = '/opt/sensor/sensor_ctl/extractor_override.zeek' | ||||
|   ZEEK_FILE_CARVING_OVERRIDE_INTERESTING_FILE = '/opt/sensor/sensor_ctl/extractor_override.interesting.zeek' | ||||
|   ZEEK_FILE_CARVING_OVERRIDE_FILE_MAP_NAME = 'extractor_mime_to_ext_map' | ||||
|   ZEEK_FILE_CARVING_PLAIN_TEXT_MIMES = { | ||||
|     "application/json", | ||||
|     "application/x-x509-ca-cert", | ||||
|     "application/xml", | ||||
|     "text/plain", | ||||
|     "text/xml" | ||||
|   } | ||||
|  | ||||
|   FILEBEAT='filebeat' | ||||
|   METRICBEAT='metricbeat' | ||||
|   AUDITBEAT='auditbeat' | ||||
|   HEATBEAT='heatbeat' # protologbeat to log temperature and other misc. stuff | ||||
|   SYSLOGBEAT='filebeat-syslog' # another filebeat instance for syslog | ||||
|   ARKIMECAP='moloch-capture' | ||||
|  | ||||
|   BEAT_DIR = { | ||||
|     FILEBEAT : f'/opt/sensor/sensor_ctl/{FILEBEAT}', | ||||
|     METRICBEAT : f'/opt/sensor/sensor_ctl/{METRICBEAT}', | ||||
|     AUDITBEAT : f'/opt/sensor/sensor_ctl/{AUDITBEAT}', | ||||
|     SYSLOGBEAT : f'/opt/sensor/sensor_ctl/{SYSLOGBEAT}', | ||||
|     HEATBEAT : f'/opt/sensor/sensor_ctl/{HEATBEAT}' | ||||
|   } | ||||
|  | ||||
|   BEAT_KIBANA_DIR = { | ||||
|     FILEBEAT : f'/usr/share/{FILEBEAT}/kibana', | ||||
|     METRICBEAT : f'/usr/share/{METRICBEAT}/kibana', | ||||
|     AUDITBEAT : f'/usr/share/{AUDITBEAT}/kibana', | ||||
|     SYSLOGBEAT : f'/usr/share/{FILEBEAT}/kibana', | ||||
|     HEATBEAT : f'/usr/share/protologbeat/kibana/' | ||||
|   } | ||||
|  | ||||
|   BEAT_CMD = { | ||||
|     FILEBEAT : f'{FILEBEAT} --path.home "{BEAT_DIR[FILEBEAT]}" --path.config "{BEAT_DIR[FILEBEAT]}" --path.data "{BEAT_DIR[FILEBEAT]}/data" --path.logs "{BEAT_DIR[FILEBEAT]}/logs" -c "{BEAT_DIR[FILEBEAT]}/{FILEBEAT}.yml"', | ||||
|     METRICBEAT : f'{METRICBEAT} --path.home "{BEAT_DIR[METRICBEAT]}" --path.config "{BEAT_DIR[METRICBEAT]}" --path.data "{BEAT_DIR[METRICBEAT]}/data" --path.logs "{BEAT_DIR[METRICBEAT]}/logs" -c "{BEAT_DIR[METRICBEAT]}/{METRICBEAT}.yml"', | ||||
|     AUDITBEAT : f'{AUDITBEAT} --path.home "{BEAT_DIR[AUDITBEAT]}" --path.config "{BEAT_DIR[AUDITBEAT]}" --path.data "{BEAT_DIR[AUDITBEAT]}/data" --path.logs "{BEAT_DIR[AUDITBEAT]}/logs" -c "{BEAT_DIR[AUDITBEAT]}/{AUDITBEAT}.yml"', | ||||
|     SYSLOGBEAT : f'{FILEBEAT} --path.home "{BEAT_DIR[SYSLOGBEAT]}" --path.config "{BEAT_DIR[SYSLOGBEAT]}" --path.data "{BEAT_DIR[SYSLOGBEAT]}/data" --path.logs "{BEAT_DIR[SYSLOGBEAT]}/logs" -c "{BEAT_DIR[SYSLOGBEAT]}/{SYSLOGBEAT}.yml"', | ||||
|     HEATBEAT : f'protologbeat --path.home "{BEAT_DIR[HEATBEAT]}" --path.config "{BEAT_DIR[HEATBEAT]}" --path.data "{BEAT_DIR[HEATBEAT]}/data" --path.logs "{BEAT_DIR[HEATBEAT]}/logs" -c "{BEAT_DIR[HEATBEAT]}/protologbeat.yml"' | ||||
|   } | ||||
|  | ||||
|   # specific to beats forwarded to logstash (eg., filebeat) | ||||
|   BEAT_LS_HOST = 'BEAT_LS_HOST' | ||||
|   BEAT_LS_PORT = 'BEAT_LS_PORT' | ||||
|   BEAT_LS_SSL = 'BEAT_LS_SSL' | ||||
|   BEAT_LS_SSL_CA_CRT = 'BEAT_LS_SSL_CA_CRT' | ||||
|   BEAT_LS_SSL_CLIENT_CRT = 'BEAT_LS_SSL_CLIENT_CRT' | ||||
|   BEAT_LS_SSL_CLIENT_KEY = 'BEAT_LS_SSL_CLIENT_KEY' | ||||
|   BEAT_LS_SSL_VERIFY = 'BEAT_LS_SSL_VERIFY' | ||||
|  | ||||
|   # specific to beats forwarded to elasticsearch (eg., metricbeat, auditbeat, filebeat-syslog) | ||||
|   BEAT_ES_HOST = "BEAT_ES_HOST" | ||||
|   BEAT_ES_PORT = "BEAT_ES_PORT" | ||||
|   BEAT_ES_PROTOCOL = "BEAT_ES_PROTOCOL" | ||||
|   BEAT_ES_SSL_VERIFY = "BEAT_ES_SSL_VERIFY" | ||||
|   BEAT_HTTP_PASSWORD = "BEAT_HTTP_PASSWORD" | ||||
|   BEAT_HTTP_USERNAME = "BEAT_HTTP_USERNAME" | ||||
|   BEAT_KIBANA_DASHBOARDS_ENABLED = "BEAT_KIBANA_DASHBOARDS_ENABLED" | ||||
|   BEAT_KIBANA_DASHBOARDS_PATH = "BEAT_KIBANA_DASHBOARDS_PATH" | ||||
|   BEAT_KIBANA_HOST = "BEAT_KIBANA_HOST" | ||||
|   BEAT_KIBANA_PORT = "BEAT_KIBANA_PORT" | ||||
|   BEAT_KIBANA_PROTOCOL = "BEAT_KIBANA_PROTOCOL" | ||||
|   BEAT_KIBANA_SSL_VERIFY = "BEAT_KIBANA_SSL_VERIFY" | ||||
|  | ||||
|   # specific to filebeat | ||||
|   BEAT_LOG_PATH_SUBDIR = os.path.join('logs', 'current') | ||||
|   BEAT_LOG_PATTERN_KEY = 'BEAT_LOG_PATTERN' | ||||
|   BEAT_LOG_PATTERN_VAL = '*.log' | ||||
|  | ||||
|   # specific to metricbeat | ||||
|   BEAT_INTERVAL = "BEAT_INTERVAL" | ||||
|  | ||||
|   # specific to moloch | ||||
|   ARKIME_PACKET_ACL = "ARKIME_PACKET_ACL" | ||||
|  | ||||
|   MSG_CONFIG_MODE = 'Configuration Mode' | ||||
|   MSG_CONFIG_MODE_CAPTURE = 'Configure Capture' | ||||
|   MSG_CONFIG_MODE_FORWARD = 'Configure Forwarding' | ||||
|   MSG_CONFIG_MODE_AUTOSTART = 'Configure Autostart Services' | ||||
|   MSG_CONFIG_GENERIC = 'Configure {}' | ||||
|   MSG_CONFIG_ARKIME = (f'{ARKIMECAP}', f'Configure Arkime session forwarding via {ARKIMECAP}') | ||||
|   MSG_CONFIG_FILEBEAT = (f'{FILEBEAT}', f'Configure Zeek log forwarding via {FILEBEAT}') | ||||
|   MSG_CONFIG_METRICBEAT = (f'{METRICBEAT}', f'Configure resource metrics forwarding via {METRICBEAT}') | ||||
|   MSG_CONFIG_AUDITBEAT = (f'{AUDITBEAT}', f'Configure audit log forwarding via {AUDITBEAT}') | ||||
|   MSG_CONFIG_SYSLOGBEAT = (f'{SYSLOGBEAT}', f'Configure syslog forwarding via {FILEBEAT}') | ||||
|   MSG_CONFIG_HEATBEAT = (f'{HEATBEAT}', f'Configure hardware metrics (temperature, etc.) forwarding via protologbeat') | ||||
|   MSG_OVERWRITE_CONFIG = '{} is already configured, overwrite current settings?' | ||||
|   MSG_IDENTIFY_NICS = 'Do you need help identifying network interfaces?' | ||||
|   MSG_BACKGROUND_TITLE = 'Sensor Configuration' | ||||
|   MSG_CONFIG_AUTOSTARTS = 'Specify autostart processes' | ||||
|   MSG_CONFIG_ZEEK_CARVED_SCANNERS = 'Specify scanners for Zeek-carved files' | ||||
|   MSG_CONFIG_ZEEK_CARVING = 'Specify Zeek file carving mode' | ||||
|   MSG_CONFIG_ZEEK_CARVING_MIMES = 'Specify file types to carve' | ||||
|   MSG_CONFIG_CARVED_FILE_PRESERVATION = 'Specify which carved files to preserve' | ||||
|   MSG_CONFIG_CAP_CONFIRM = 'Sensor will capture traffic with the following parameters:\n\n{}' | ||||
|   MSG_CONFIG_AUTOSTART_CONFIRM = 'Sensor autostart the following services:\n\n{}' | ||||
|   MSG_CONFIG_FORWARDING_CONFIRM = '{} will forward with the following parameters:\n\n{}' | ||||
|   MSG_CONFIG_CAP_PATHS = 'Provide paths for captured PCAPs and Zeek logs' | ||||
|   MSG_CONFIG_CAPTURE_SUCCESS = 'Capture interface set to {} in {}.\n\nReboot to apply changes.' | ||||
|   MSG_CONFIG_AUTOSTART_SUCCESS = 'Autostart services configured.\n\nReboot to apply changes.' | ||||
|   MSG_CONFIG_FORWARDING_SUCCESS = '{} forwarding configured:\n\n{}\n\nRestart forwarding services or reboot to apply changes.' | ||||
|   MSG_CONFIG_ARKIME_PCAP_ACL = 'Specify IP addresses for PCAP retrieval ACL (one per line)' | ||||
|   MSG_ERR_PLEBE_REQUIRED = 'this utility should be be run as non-privileged user' | ||||
|   MSG_ERROR_DIR_NOT_FOUND = 'One or more of the paths specified does not exist' | ||||
|   MSG_ERROR_FILE_NOT_FOUND = 'One or more of the files specified does not exist' | ||||
|   MSG_ERROR_BAD_HOST = 'Invalid host or port' | ||||
|   MSG_ERROR_FWD_DIR_NOT_FOUND = 'The path {} does not exist, {} cannot be configured' | ||||
|   MSG_ERROR_MISSING_CAP_CONFIG = f'Capture configuration file {SENSOR_CAPTURE_CONFIG} does not exist' | ||||
|   MSG_ERROR_KEYSTORE = 'There was an error creating the keystore for {}:\n\n{}' | ||||
|   MSG_ERROR_FILTER_VALIDATION = "Warning: capture filter failed validation ({}). Adjust filter, or resubmit unchanged to ignore warning." | ||||
|   MSG_MESSAGE_ERROR = 'Error: {}\n\nPlease try again.' | ||||
|   MSG_CANCEL_ERROR = 'Operation cancelled, goodbye!' | ||||
|   MSG_EMPTY_CONFIG_ERROR = "No configuration values were supplied" | ||||
|   MSG_SELECT_INTERFACE = 'Select capture interface(s)' | ||||
|   MSG_SELECT_BLINK_INTERFACE = 'Select capture interface to identify' | ||||
|   MSG_BLINK_INTERFACE = '{} will blink for {} seconds' | ||||
|   MSG_WELCOME_TITLE = 'Welcome to the sensor capture and forwarding configuration utility!' | ||||
|   MSG_TESTING_CONNECTION = 'Testing {} connection...' | ||||
|   MSG_TESTING_CONNECTION_SUCCESS = '{} connection succeeded! ({} {})' | ||||
|   MSG_TESTING_CONNECTION_FAILURE = "{} connection error: {} {}:\n\n {}" | ||||
|   MSG_TESTING_CONNECTION_FAILURE_LOGSTASH = "{} connection error: could not connect to {}:{}" | ||||
|   MSG_WARNING_MULTIPLE_PCAP = "Warning: multiple PCAP processes are enabled ({}). Using a single PCAP process is recommended." | ||||
|  | ||||
| # the main dialog window used for the duration of this tool | ||||
| d = Dialog(dialog='dialog', autowidgetsize=True) | ||||
| d.set_background_title(Constants.MSG_BACKGROUND_TITLE) | ||||
|  | ||||
| ################################################################################################### | ||||
| def mime_to_extension_mappings(mapfile): | ||||
|   # get all mime-to-extension mappings from our mapping zeek file into a dictionary | ||||
|   mime_maps = defaultdict(str) | ||||
|  | ||||
|   if os.path.exists(mapfile): | ||||
|     maps_list = [] | ||||
|     with open(mapfile) as f: | ||||
|       maps_list = [x.replace(' ', '') for x in re.findall(r'(\[\s*"[A-Za-z0-9/\.\+_-]+"\s*\]\s*=\s*"[A-Za-z0-9\.\+_-]+")', f.read(), re.MULTILINE)] | ||||
|     mime_map_re = re.compile(r'\[\s*"([A-Za-z0-9/\.\+_-]+)"\s*\]\s*=\s*"([A-Za-z0-9\.\+_-]+)"') | ||||
|     for mime_map in maps_list: | ||||
|       match = mime_map_re.search(mime_map) | ||||
|       if match: | ||||
|         mime_maps[match.group(1)] = match.group(2) | ||||
|  | ||||
|   return mime_maps | ||||
|  | ||||
| ################################################################################################### | ||||
| def input_elasticsearch_connection_info(forwarder, | ||||
|                                         default_es_host=None, | ||||
|                                         default_es_port=None, | ||||
|                                         default_kibana_host=None, | ||||
|                                         default_kibana_port=None, | ||||
|                                         default_username=None, | ||||
|                                         default_password=None): | ||||
|  | ||||
|   return_dict = defaultdict(str) | ||||
|  | ||||
|   # Elasticsearch configuration | ||||
|   # elasticsearch protocol and SSL verification mode | ||||
|   elastic_protocol = "http" | ||||
|   elastic_ssl_verify = "none" | ||||
|   if (d.yesno("Elasticsearch connection protocol", yes_label="HTTPS", no_label="HTTP") == Dialog.OK): | ||||
|     elastic_protocol = "https" | ||||
|     if (d.yesno("Elasticsearch SSL verification", yes_label="None", no_label="Full") != Dialog.OK): | ||||
|       elastic_ssl_verify = "full" | ||||
|   return_dict[Constants.BEAT_ES_PROTOCOL] = elastic_protocol | ||||
|   return_dict[Constants.BEAT_ES_SSL_VERIFY] = elastic_ssl_verify | ||||
|  | ||||
|   while True: | ||||
|     # host/port for Elasticsearch | ||||
|     code, values = d.form(Constants.MSG_CONFIG_GENERIC.format(forwarder), [ | ||||
|                           ('Elasticsearch Host', 1, 1, default_es_host or "", 1,  25, 30, 255), | ||||
|                           ('Elasticsearch Port', 2, 1, default_es_port or "9200", 2, 25, 6, 5) | ||||
|                           ]) | ||||
|     values = [x.strip() for x in values] | ||||
|  | ||||
|     if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|       raise CancelledError | ||||
|  | ||||
|     elif (len(values[0]) <= 0) or (len(values[1]) <= 0) or (not values[1].isnumeric()): | ||||
|       code = d.msgbox(text=Constants.MSG_ERROR_BAD_HOST) | ||||
|  | ||||
|     else: | ||||
|       return_dict[Constants.BEAT_ES_HOST] = values[0] | ||||
|       return_dict[Constants.BEAT_ES_PORT] = values[1] | ||||
|       break | ||||
|  | ||||
|   # Kibana configuration (if supported by forwarder) | ||||
|   if (forwarder in Constants.BEAT_KIBANA_DIR.keys()) and (d.yesno(f"Configure {forwarder} Kibana connectivity?") == Dialog.OK): | ||||
|     # elasticsearch protocol and SSL verification mode | ||||
|     kibana_protocol = "http" | ||||
|     kibana_ssl_verify = "none" | ||||
|     if (d.yesno("Kibana connection protocol", yes_label="HTTPS", no_label="HTTP") == Dialog.OK): | ||||
|       kibana_protocol = "https" | ||||
|       if (d.yesno("Kibana SSL verification", yes_label="None", no_label="Full") != Dialog.OK): | ||||
|         kibana_ssl_verify = "full" | ||||
|     return_dict[Constants.BEAT_KIBANA_PROTOCOL] = kibana_protocol | ||||
|     return_dict[Constants.BEAT_KIBANA_SSL_VERIFY] = kibana_ssl_verify | ||||
|  | ||||
|     while True: | ||||
|       # host/port for Kibana | ||||
|       code, values = d.form(Constants.MSG_CONFIG_GENERIC.format(forwarder), [ | ||||
|                             ('Kibana Host', 1, 1, default_kibana_host or "", 1,  20, 30, 255), | ||||
|                             ('Kibana Port', 2, 1, default_kibana_port or "5601", 2, 20, 6, 5) | ||||
|                             ]) | ||||
|       values = [x.strip() for x in values] | ||||
|  | ||||
|       if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|         raise CancelledError | ||||
|  | ||||
|       elif (len(values[0]) <= 0) or (len(values[1]) <= 0) or (not values[1].isnumeric()): | ||||
|         code = d.msgbox(text=Constants.MSG_ERROR_BAD_HOST) | ||||
|  | ||||
|       else: | ||||
|         return_dict[Constants.BEAT_KIBANA_HOST] = values[0] | ||||
|         return_dict[Constants.BEAT_KIBANA_PORT] = values[1] | ||||
|         break | ||||
|  | ||||
|     if (d.yesno(f"Configure {forwarder} Kibana dashboards?") == Dialog.OK): | ||||
|       kibana_dashboards = "true" | ||||
|     else: | ||||
|       kibana_dashboards = "false" | ||||
|     return_dict[Constants.BEAT_KIBANA_DASHBOARDS_ENABLED] = kibana_dashboards | ||||
|  | ||||
|     if kibana_dashboards == "true": | ||||
|       while True: | ||||
|         code, values = d.form(Constants.MSG_CONFIG_GENERIC.format(forwarder), [ | ||||
|                               ('Kibana Dashboards Path', 1, 1, Constants.BEAT_KIBANA_DIR[forwarder], 1, 30, 30, 255) | ||||
|                               ]) | ||||
|         values = [x.strip() for x in values] | ||||
|  | ||||
|         if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|           raise CancelledError | ||||
|  | ||||
|         elif (len(values[0]) <= 0) or (not os.path.isdir(values[0])): | ||||
|           code = d.msgbox(text=Constants.MSG_ERROR_DIR_NOT_FOUND) | ||||
|  | ||||
|         else: | ||||
|           return_dict[Constants.BEAT_KIBANA_DASHBOARDS_PATH] = values[0] | ||||
|           break | ||||
|  | ||||
|   server_display_name = "Elasticsearch/Kibana" if Constants.BEAT_KIBANA_HOST in return_dict.keys() else "Elasticsearch" | ||||
|  | ||||
|   # HTTP/HTTPS authentication | ||||
|   code, http_username = d.inputbox(f"{server_display_name} HTTP/HTTPS server username", init=default_username) | ||||
|   if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|     raise CancelledError | ||||
|   return_dict[Constants.BEAT_HTTP_USERNAME] = http_username.strip() | ||||
|  | ||||
|   # make them enter the password twice | ||||
|   while True: | ||||
|     code, http_password = d.passwordbox(f"{server_display_name} HTTP/HTTPS server password", insecure=True, init=default_password) | ||||
|     if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|       raise CancelledError | ||||
|  | ||||
|     code, http_password2 = d.passwordbox(f"{server_display_name} HTTP/HTTPS server password (again)", insecure=True, init=default_password if (http_password == default_password) else "") | ||||
|     if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|       raise CancelledError | ||||
|  | ||||
|     if (http_password == http_password2): | ||||
|       return_dict[Constants.BEAT_HTTP_PASSWORD] = http_password.strip() | ||||
|       break | ||||
|     else: | ||||
|       code = d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format("Passwords did not match")) | ||||
|  | ||||
|   # test Elasticsearch connection | ||||
|   code = d.infobox(Constants.MSG_TESTING_CONNECTION.format("Elasticsearch")) | ||||
|   retcode, message, output = test_connection(protocol=return_dict[Constants.BEAT_ES_PROTOCOL], | ||||
|                                              host=return_dict[Constants.BEAT_ES_HOST], | ||||
|                                              port=return_dict[Constants.BEAT_ES_PORT], | ||||
|                                              username=return_dict[Constants.BEAT_HTTP_USERNAME] if (len(return_dict[Constants.BEAT_HTTP_USERNAME]) > 0) else None, | ||||
|                                              password=return_dict[Constants.BEAT_HTTP_PASSWORD] if (len(return_dict[Constants.BEAT_HTTP_PASSWORD]) > 0) else None, | ||||
|                                              ssl_verify=return_dict[Constants.BEAT_ES_SSL_VERIFY]) | ||||
|   if (retcode == 200): | ||||
|     code = d.msgbox(text=Constants.MSG_TESTING_CONNECTION_SUCCESS.format("Elasticsearch", retcode, message)) | ||||
|   else: | ||||
|     code = d.yesno(text=Constants.MSG_TESTING_CONNECTION_FAILURE.format("Elasticsearch", retcode, message, "\n".join(output)), | ||||
|                    yes_label="Ignore Error", no_label="Start Over") | ||||
|     if code != Dialog.OK: | ||||
|       raise CancelledError | ||||
|  | ||||
|   # test Kibana connection | ||||
|   if Constants.BEAT_KIBANA_HOST in return_dict.keys(): | ||||
|     code = d.infobox(Constants.MSG_TESTING_CONNECTION.format("Kibana")) | ||||
|     retcode, message, output = test_connection(protocol=return_dict[Constants.BEAT_KIBANA_PROTOCOL], | ||||
|                                                host=return_dict[Constants.BEAT_KIBANA_HOST], | ||||
|                                                port=return_dict[Constants.BEAT_KIBANA_PORT], | ||||
|                                                uri="api/status", | ||||
|                                                username=return_dict[Constants.BEAT_HTTP_USERNAME] if (len(return_dict[Constants.BEAT_HTTP_USERNAME]) > 0) else None, | ||||
|                                                password=return_dict[Constants.BEAT_HTTP_PASSWORD] if (len(return_dict[Constants.BEAT_HTTP_PASSWORD]) > 0) else None, | ||||
|                                                ssl_verify=return_dict[Constants.BEAT_KIBANA_SSL_VERIFY]) | ||||
|     if (retcode == 200): | ||||
|       code = d.msgbox(text=Constants.MSG_TESTING_CONNECTION_SUCCESS.format("Kibana", retcode, message)) | ||||
|     else: | ||||
|       code = d.yesno(text=Constants.MSG_TESTING_CONNECTION_FAILURE.format("Kibana", retcode, message, "\n".join(output)), | ||||
|                      yes_label="Ignore Error", no_label="Start Over") | ||||
|       if code != Dialog.OK: | ||||
|         raise CancelledError | ||||
|  | ||||
|   return return_dict | ||||
|  | ||||
| ################################################################################################### | ||||
| ################################################################################################### | ||||
| def main(): | ||||
|   locale.setlocale(locale.LC_ALL, '') | ||||
|  | ||||
|   # make sure we are NOT being run as root | ||||
|   if os.getuid() == 0: | ||||
|     print(Constants.MSG_ERR_PLEBE_REQUIRED) | ||||
|     sys.exit(1) | ||||
|  | ||||
|   # what are we (sensor vs. aggregator) | ||||
|   installation = Constants.DEV_UNKNOWN | ||||
|   modeChoices = [] | ||||
|   try: | ||||
|     with open(Constants.DEV_IDENTIFIER_FILE, 'r') as f: | ||||
|       installation = f.readline().strip() | ||||
|   except: | ||||
|     pass | ||||
|   if (installation not in Constants.DEV_VALID): | ||||
|     print(Constants.MSG_ERR_DEV_INVALID) | ||||
|     sys.exit(1) | ||||
|   elif (installation == Constants.DEV_SENSOR): | ||||
|     modeChoices = [(Constants.MSG_CONFIG_MODE_CAPTURE, ""), (Constants.MSG_CONFIG_MODE_FORWARD, ""), (Constants.MSG_CONFIG_MODE_AUTOSTART, "")] | ||||
|   else: | ||||
|     print(Constants.MSG_ERR_DEV_INCORRECT.format(installation)) | ||||
|     sys.exit(1) | ||||
|  | ||||
|   start_dir = os.getcwd() | ||||
|   quit_flag = False | ||||
|  | ||||
|   # store previously-entered elasticsearch values in case they are going through the loop | ||||
|   # mulitple times to prevent them from having to enter them over and over | ||||
|   previous_config_values = defaultdict(str) | ||||
|  | ||||
|   while not quit_flag: | ||||
|     try: | ||||
|       os.chdir(start_dir) | ||||
|  | ||||
|       if not os.path.isfile(Constants.SENSOR_CAPTURE_CONFIG): | ||||
|         # SENSOR_CAPTURE_CONFIG file doesn't exist, can't continue | ||||
|         raise Exception(Constants.MSG_ERROR_MISSING_CAP_CONFIG) | ||||
|  | ||||
|       # read existing configuration from SENSOR_CAPTURE_CONFIG into a dictionary file (not written back out as such, just used | ||||
|       # as a basis for default values) | ||||
|       capture_config_dict = defaultdict(str) | ||||
|       with open(Constants.SENSOR_CAPTURE_CONFIG) as file: | ||||
|         for line in file: | ||||
|           if len(line.strip()) > 0: | ||||
|             name, var = remove_prefix(line, "export").partition("=")[::2] | ||||
|             capture_config_dict[name.strip()] = var.strip().strip("'").strip('"') | ||||
|       if (Constants.BEAT_ES_HOST not in previous_config_values.keys()) and ("ES_HOST" in capture_config_dict.keys()): | ||||
|         previous_config_values[Constants.BEAT_ES_HOST] = capture_config_dict["ES_HOST"] | ||||
|         previous_config_values[Constants.BEAT_KIBANA_HOST] = capture_config_dict["ES_HOST"] | ||||
|       if (Constants.BEAT_ES_PORT not in previous_config_values.keys()) and ("ES_PORT" in capture_config_dict.keys()): | ||||
|         previous_config_values[Constants.BEAT_ES_PORT] = capture_config_dict["ES_PORT"] | ||||
|       if (Constants.BEAT_HTTP_USERNAME not in previous_config_values.keys()) and ("ES_USERNAME" in capture_config_dict.keys()): | ||||
|         previous_config_values[Constants.BEAT_HTTP_USERNAME] = capture_config_dict["ES_USERNAME"] | ||||
|       if (Constants.ARKIME_PACKET_ACL not in previous_config_values.keys()) and ("ARKIME_PACKET_ACL" in capture_config_dict.keys()): | ||||
|         previous_config_values[Constants.ARKIME_PACKET_ACL] = capture_config_dict[Constants.ARKIME_PACKET_ACL] | ||||
|  | ||||
|       code = d.yesno(Constants.MSG_WELCOME_TITLE, yes_label="Continue", no_label="Quit") | ||||
|       if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|         quit_flag = True | ||||
|         raise CancelledError | ||||
|  | ||||
|       code, mode = d.menu(Constants.MSG_CONFIG_MODE, choices=modeChoices) | ||||
|       if code != Dialog.OK: | ||||
|         quit_flag = True | ||||
|         raise CancelledError | ||||
|  | ||||
|       if mode == Constants.MSG_CONFIG_MODE_AUTOSTART: | ||||
|         ##### sensor autostart services configuration ####################################################################################### | ||||
|  | ||||
|         while True: | ||||
|           # select processes for autostart (except for the file scan ones, handle those with the file scanning stuff) | ||||
|           autostart_choices = [] | ||||
|           for k, v in sorted(capture_config_dict.items()): | ||||
|             if k.startswith("AUTOSTART_"): | ||||
|               autostart_choices.append((k, '', v.lower() == "true")) | ||||
|           code, autostart_tags = d.checklist(Constants.MSG_CONFIG_AUTOSTARTS, choices=autostart_choices) | ||||
|           if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|             raise CancelledError | ||||
|  | ||||
|           for tag in [x[0] for x in autostart_choices]: | ||||
|             capture_config_dict[tag] = "false" | ||||
|           for tag in autostart_tags: | ||||
|             capture_config_dict[tag] = "true" | ||||
|  | ||||
|           # warn them if we're doing mulitple PCAP capture processes | ||||
|           pcap_procs_enabled = [x for x in autostart_tags if x in Constants.PCAP_CAPTURE_AUTOSTART_ENTRIES] | ||||
|           if ((len(pcap_procs_enabled) <= 1) or | ||||
|               (d.yesno(text=Constants.MSG_WARNING_MULTIPLE_PCAP.format(", ".join(pcap_procs_enabled)), | ||||
|                        yes_label="Continue Anyway", no_label="Adjust Selections") == Dialog.OK)): | ||||
|             break | ||||
|  | ||||
|         # get confirmation from user that we really want to do this | ||||
|         code = d.yesno(Constants.MSG_CONFIG_AUTOSTART_CONFIRM.format("\n".join(sorted([f"{k}={v}" for k, v in capture_config_dict.items() if "AUTOSTART" in k]))), | ||||
|                        yes_label="OK", no_label="Cancel") | ||||
|         if code == Dialog.OK: | ||||
|  | ||||
|           # modify specified values in-place in SENSOR_CAPTURE_CONFIG file | ||||
|           autostart_re = re.compile(r"(\bAUTOSTART_\w+)\s*=\s*.+?$") | ||||
|           with fileinput.FileInput(Constants.SENSOR_CAPTURE_CONFIG, inplace=True, backup='.bak') as file: | ||||
|             for line in file: | ||||
|               line = line.rstrip("\n") | ||||
|               autostart_match = autostart_re.search(line) | ||||
|               if autostart_match is not None: | ||||
|                 print(autostart_re.sub(r"\1=%s" % capture_config_dict[autostart_match.group(1)], line)) | ||||
|               else: | ||||
|                 print(line) | ||||
|  | ||||
|           # hooray | ||||
|           code = d.msgbox(text=Constants.MSG_CONFIG_AUTOSTART_SUCCESS) | ||||
|  | ||||
|       elif mode == Constants.MSG_CONFIG_MODE_CAPTURE: | ||||
|         ##### sensor capture configuration ################################################################################################## | ||||
|  | ||||
|         # determine a list of available (non-virtual) adapters | ||||
|         available_adapters = get_available_adapters() | ||||
|         # previously used capture interfaces | ||||
|         preselected_ifaces = set([x.strip() for x in capture_config_dict["CAPTURE_INTERFACE"].split(',')]) | ||||
|  | ||||
|         while (len(available_adapters) > 0) and (d.yesno(Constants.MSG_IDENTIFY_NICS) == Dialog.OK): | ||||
|           code, blinky_iface = d.radiolist(Constants.MSG_SELECT_BLINK_INTERFACE, choices=[(adapter.name, adapter.description, False) for adapter in available_adapters]) | ||||
|           if (code == Dialog.OK) and (len(blinky_iface) > 0): | ||||
|             if (d.yesno(Constants.MSG_BLINK_INTERFACE.format(blinky_iface, NIC_BLINK_SECONDS), yes_label="Ready", no_label="Cancel") == Dialog.OK): | ||||
|               identify_adapter(adapter=blinky_iface, duration=NIC_BLINK_SECONDS, background=True) | ||||
|               code = d.pause(f"Identifying {blinky_iface}", seconds=NIC_BLINK_SECONDS, width=60, height=15) | ||||
|           elif (code != Dialog.OK): | ||||
|             break | ||||
|  | ||||
|         # user selects interface(s) for capture | ||||
|         code, tag = d.checklist(Constants.MSG_SELECT_INTERFACE, choices=[(adapter.name, adapter.description, adapter.name in preselected_ifaces) for adapter in available_adapters]) | ||||
|         if code != Dialog.OK: | ||||
|           raise CancelledError | ||||
|         selected_ifaces = tag | ||||
|  | ||||
|         if (len(selected_ifaces) > 0): | ||||
|           # user specifies capture filter (and we validate it with tcpdump) | ||||
|           prev_capture_filter = capture_config_dict["CAPTURE_FILTER"] | ||||
|           while True: | ||||
|             code, capture_filter = d.inputbox("PCAP capture filter (tcpdump-like filter expression; leave blank to capture all traffic)", init=prev_capture_filter) | ||||
|             if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|               raise CancelledError | ||||
|             capture_filter = capture_filter.strip() | ||||
|             if (len(capture_filter) > 0): | ||||
|               # test out the capture filter to see if there's a syntax error | ||||
|               ecode, filter_test_results = run_process(f'tcpdump -i {selected_ifaces[0]} -d "{capture_filter}"', stdout=False, stderr=True) | ||||
|             else: | ||||
|               # nothing to validate | ||||
|               ecode = 0 | ||||
|               filter_test_results = [""] | ||||
|             if (prev_capture_filter == capture_filter) or ((ecode == 0) and | ||||
|                                                            (not any(x.lower().startswith("tcpdump: warning") for x in filter_test_results)) and | ||||
|                                                            (not any(x.lower().startswith("tcpdump: error") for x in filter_test_results)) and | ||||
|                                                            (not any("syntax error" in x.lower() for x in filter_test_results))): | ||||
|               break | ||||
|             else: | ||||
|               code = d.msgbox(text=Constants.MSG_ERROR_FILTER_VALIDATION.format(" ".join([x.strip() for x in filter_test_results]))) | ||||
|             prev_capture_filter = capture_filter | ||||
|  | ||||
|         # regular expressions for selected name=value pairs to update in configuration file | ||||
|         capture_interface_re = re.compile(r"(\bCAPTURE_INTERFACE)\s*=\s*.+?$") | ||||
|         capture_filter_re = re.compile(r"(\bCAPTURE_FILTER)\s*=\s*.*?$") | ||||
|         pcap_path_re = re.compile(r"(\bPCAP_PATH)\s*=\s*.+?$") | ||||
|         zeek_path_re = re.compile(r"(\bZEEK_LOG_PATH)\s*=\s*.+?$") | ||||
|         zeek_carve_re = re.compile(r"(\bZEEK_EXTRACTOR_MODE)\s*=\s*.+?$") | ||||
|         zeek_file_preservation_re = re.compile(r"(\bEXTRACTED_FILE_PRESERVATION)\s*=\s*.+?$") | ||||
|         zeek_carve_override_re = re.compile(r"(\bZEEK_EXTRACTOR_OVERRIDE_FILE)\s*=\s*.*?$") | ||||
|         zeek_file_watch_re = re.compile(r"(\bZEEK_FILE_WATCH)\s*=\s*.+?$") | ||||
|         zeek_file_scanner_re = re.compile(r"(\bZEEK_FILE_SCAN_\w+)\s*=\s*.+?$") | ||||
|  | ||||
|         # get paths for captured PCAP and Zeek files | ||||
|         while True: | ||||
|           code, path_values = d.form(Constants.MSG_CONFIG_CAP_PATHS, [ | ||||
|                                 ('PCAP Path',    1, 1, capture_config_dict.get("PCAP_PATH", ""),  1, 20, 30, 255), | ||||
|                                 ('Zeek Log Path', 2, 1, capture_config_dict.get("ZEEK_LOG_PATH", ""), 2, 20, 30, 255), | ||||
|                                 ]) | ||||
|           path_values = [x.strip() for x in path_values] | ||||
|  | ||||
|           if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|             raise CancelledError | ||||
|  | ||||
|           # paths must be specified, and must already exist | ||||
|           if ((len(path_values[0]) > 0) and os.path.isdir(path_values[0]) and | ||||
|               (len(path_values[1]) > 0) and os.path.isdir(path_values[1])): | ||||
|             break | ||||
|           else: | ||||
|             code = d.msgbox(text=Constants.MSG_ERROR_DIR_NOT_FOUND) | ||||
|  | ||||
|         # configure file carving | ||||
|         code, zeek_carve_mode = d.radiolist(Constants.MSG_CONFIG_ZEEK_CARVING, choices=[(Constants.ZEEK_FILE_CARVING_NONE, | ||||
|                                                                                         'Disable file carving', | ||||
|                                                                                         (capture_config_dict["ZEEK_EXTRACTOR_MODE"] == Constants.ZEEK_FILE_CARVING_NONE)), | ||||
|                                                                                       (Constants.ZEEK_FILE_CARVING_MAPPED, | ||||
|                                                                                         'Carve files with recognized mime types', | ||||
|                                                                                         ((capture_config_dict["ZEEK_EXTRACTOR_MODE"] == Constants.ZEEK_FILE_CARVING_MAPPED) and (len(capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"]) == 0))), | ||||
|                                                                                       (Constants.ZEEK_FILE_CARVING_MAPPED_MINUS_TEXT, | ||||
|                                                                                         'Carve files with recognized mime types (except common plain text files)', False), | ||||
|                                                                                       (Constants.ZEEK_FILE_CARVING_KNOWN, | ||||
|                                                                                         'Carve files for which any mime type can be determined', | ||||
|                                                                                         (capture_config_dict["ZEEK_EXTRACTOR_MODE"] == Constants.ZEEK_FILE_CARVING_KNOWN)), | ||||
|                                                                                       (Constants.ZEEK_FILE_CARVING_INTERESTING, | ||||
|                                                                                         'Carve files with mime types of common attack vectors', False), | ||||
|                                                                                       (Constants.ZEEK_FILE_CARVING_CUSTOM_MIME, | ||||
|                                                                                         'Use a custom selection of mime types (sorted by mime type)', | ||||
|                                                                                         ((capture_config_dict["ZEEK_EXTRACTOR_MODE"] == Constants.ZEEK_FILE_CARVING_MAPPED) and (len(capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"]) > 0))), | ||||
|                                                                                       (Constants.ZEEK_FILE_CARVING_CUSTOM_EXT, | ||||
|                                                                                         'Use a custom selection of mime types (sorted by file extension)', False), | ||||
|                                                                                       (Constants.ZEEK_FILE_CARVING_ALL, | ||||
|                                                                                         'Carve all files', | ||||
|                                                                                         (capture_config_dict["ZEEK_EXTRACTOR_MODE"] == Constants.ZEEK_FILE_CARVING_ALL))]) | ||||
|         if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|           raise CancelledError | ||||
|  | ||||
|         mime_tags = [] | ||||
|         capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"] = "" | ||||
|         zeek_carved_file_preservation = PRESERVE_NONE | ||||
|  | ||||
|         if zeek_carve_mode.startswith(Constants.ZEEK_FILE_CARVING_CUSTOM) or zeek_carve_mode.startswith(Constants.ZEEK_FILE_CARVING_MAPPED_MINUS_TEXT): | ||||
|  | ||||
|           # get all known mime-to-extension mappings into a dictionary | ||||
|           all_mime_maps = mime_to_extension_mappings(Constants.ZEEK_FILE_CARVING_DEFAULTS) | ||||
|  | ||||
|           if (zeek_carve_mode == Constants.ZEEK_FILE_CARVING_MAPPED_MINUS_TEXT): | ||||
|             # all mime types minus common text mime types | ||||
|             mime_tags.extend([mime for mime in all_mime_maps.keys() if mime not in Constants.ZEEK_FILE_CARVING_PLAIN_TEXT_MIMES]) | ||||
|  | ||||
|           else: | ||||
|             # select mimes to carve (pre-selecting items previously in the override file) | ||||
|             if (zeek_carve_mode == Constants.ZEEK_FILE_CARVING_CUSTOM_EXT): | ||||
|               mime_choices = [(pair[0], pair[1], pair[0] in mime_to_extension_mappings(Constants.ZEEK_FILE_CARVING_OVERRIDE_FILE)) for pair in sorted(all_mime_maps.items(), key=lambda x: x[1].lower())] | ||||
|             else: | ||||
|               mime_choices = [(pair[0], pair[1], pair[0] in mime_to_extension_mappings(Constants.ZEEK_FILE_CARVING_OVERRIDE_FILE)) for pair in sorted(all_mime_maps.items(), key=lambda x: x[0].lower())] | ||||
|             code, mime_tags = d.checklist(Constants.MSG_CONFIG_ZEEK_CARVING_MIMES, choices=mime_choices) | ||||
|             if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|               raise CancelledError | ||||
|  | ||||
|           mime_tags.sort() | ||||
|           if (len(mime_tags) == 0): | ||||
|             zeek_carve_mode = Constants.ZEEK_FILE_CARVING_NONE | ||||
|           elif (len(mime_tags) >= len(all_mime_maps)): | ||||
|             zeek_carve_mode = Constants.ZEEK_FILE_CARVING_MAPPED | ||||
|           elif len(mime_tags) > 0: | ||||
|             zeek_carve_mode = Constants.ZEEK_FILE_CARVING_MAPPED | ||||
|             capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"] = Constants.ZEEK_FILE_CARVING_OVERRIDE_FILE | ||||
|           else: | ||||
|             zeek_carve_mode = Constants.ZEEK_FILE_CARVING_MAPPED | ||||
|  | ||||
|         elif zeek_carve_mode.startswith(Constants.ZEEK_FILE_CARVING_INTERESTING): | ||||
|           shutil.copy(Constants.ZEEK_FILE_CARVING_OVERRIDE_INTERESTING_FILE, Constants.ZEEK_FILE_CARVING_OVERRIDE_FILE) | ||||
|           zeek_carve_mode = Constants.ZEEK_FILE_CARVING_MAPPED | ||||
|           capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"] = Constants.ZEEK_FILE_CARVING_OVERRIDE_FILE | ||||
|  | ||||
|  | ||||
|         # what to do with carved files | ||||
|         if (zeek_carve_mode != Constants.ZEEK_FILE_CARVING_NONE): | ||||
|  | ||||
|           # select engines for file scanning | ||||
|           scanner_choices = [] | ||||
|           for k, v in sorted(capture_config_dict.items()): | ||||
|             if k.startswith("ZEEK_FILE_SCAN_"): | ||||
|               scanner_choices.append((k, '', v.lower() == "true")) | ||||
|           code, scanner_tags = d.checklist(Constants.MSG_CONFIG_ZEEK_CARVED_SCANNERS, choices=scanner_choices) | ||||
|           if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|             raise CancelledError | ||||
|  | ||||
|           for tag in [x[0] for x in scanner_choices]: | ||||
|             capture_config_dict[tag] = "false" | ||||
|           for tag in scanner_tags: | ||||
|             capture_config_dict[tag] = "true" | ||||
|           capture_config_dict["ZEEK_FILE_WATCH"] = "true" if (len(scanner_tags) > 0) else "false" | ||||
|  | ||||
|           # specify what to do with files that triggered the scanner engine(s) | ||||
|           code, zeek_carved_file_preservation = d.radiolist(Constants.MSG_CONFIG_CARVED_FILE_PRESERVATION, | ||||
|                                                            choices=[(PRESERVE_QUARANTINED, | ||||
|                                                                      'Preserve only quarantined files', | ||||
|                                                                      (capture_config_dict["EXTRACTED_FILE_PRESERVATION"] == PRESERVE_QUARANTINED)), | ||||
|                                                                     (PRESERVE_ALL, | ||||
|                                                                      'Preserve all files', | ||||
|                                                                      (capture_config_dict["EXTRACTED_FILE_PRESERVATION"] == PRESERVE_ALL)), | ||||
|                                                                     (PRESERVE_NONE, | ||||
|                                                                      'Preserve no files', | ||||
|                                                                      (capture_config_dict["EXTRACTED_FILE_PRESERVATION"] == PRESERVE_NONE))]) | ||||
|           if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|             raise CancelledError | ||||
|  | ||||
|         else: | ||||
|           # file carving disabled, so disable file scanning as well | ||||
|           for key in ["ZEEK_FILE_WATCH", "ZEEK_FILE_SCAN_CLAMAV", "ZEEK_FILE_SCAN_VTOT", "ZEEK_FILE_SCAN_MALASS", "ZEEK_FILE_SCAN_YARA"]: | ||||
|             capture_config_dict[key] = "false" | ||||
|  | ||||
|         # reconstitute dictionary with user-specified values | ||||
|         capture_config_dict["CAPTURE_INTERFACE"] = ",".join(selected_ifaces) | ||||
|         capture_config_dict["CAPTURE_FILTER"] = capture_filter | ||||
|         capture_config_dict["PCAP_PATH"] = path_values[0] | ||||
|         capture_config_dict["ZEEK_LOG_PATH"] = path_values[1] | ||||
|         capture_config_dict["ZEEK_EXTRACTOR_MODE"] = zeek_carve_mode | ||||
|         capture_config_dict["EXTRACTED_FILE_PRESERVATION"] = zeek_carved_file_preservation | ||||
|  | ||||
|         # get confirmation from user that we really want to do this | ||||
|         code = d.yesno(Constants.MSG_CONFIG_CAP_CONFIRM.format("\n".join(sorted([f"{k}={v}" for k, v in capture_config_dict.items() if (not k.startswith("#")) and ("AUTOSTART" not in k) and ("PASSWORD" not in k)]))), | ||||
|                        yes_label="OK", no_label="Cancel") | ||||
|         if code == Dialog.OK: | ||||
|  | ||||
|           # modify specified values in-place in SENSOR_CAPTURE_CONFIG file | ||||
|           with fileinput.FileInput(Constants.SENSOR_CAPTURE_CONFIG, inplace=True, backup='.bak') as file: | ||||
|             for line in file: | ||||
|               line = line.rstrip("\n") | ||||
|               if capture_interface_re.search(line) is not None: | ||||
|                 print(capture_interface_re.sub(r"\1=%s" % ",".join(selected_ifaces), line)) | ||||
|               elif zeek_carve_override_re.search(line) is not None: | ||||
|                 print(zeek_carve_override_re.sub(r'\1="%s"' % capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"], line)) | ||||
|               elif zeek_carve_re.search(line) is not None: | ||||
|                 print(zeek_carve_re.sub(r"\1=%s" % zeek_carve_mode, line)) | ||||
|               elif zeek_file_preservation_re.search(line) is not None: | ||||
|                 print(zeek_file_preservation_re.sub(r"\1=%s" % zeek_carved_file_preservation, line)) | ||||
|               elif capture_filter_re.search(line) is not None: | ||||
|                 print(capture_filter_re.sub(r'\1="%s"' % capture_filter, line)) | ||||
|               elif pcap_path_re.search(line) is not None: | ||||
|                 print(pcap_path_re.sub(r'\1="%s"' % capture_config_dict["PCAP_PATH"], line)) | ||||
|               elif zeek_path_re.search(line) is not None: | ||||
|                 print(zeek_path_re.sub(r'\1="%s"' % capture_config_dict["ZEEK_LOG_PATH"], line)) | ||||
|               elif zeek_file_watch_re.search(line) is not None: | ||||
|                 print(zeek_file_watch_re.sub(r"\1=%s" % capture_config_dict["ZEEK_FILE_WATCH"], line)) | ||||
|               else: | ||||
|                 zeek_file_scanner_match = zeek_file_scanner_re.search(line) | ||||
|                 if zeek_file_scanner_match is not None: | ||||
|                   print(zeek_file_scanner_re.sub(r"\1=%s" % capture_config_dict[zeek_file_scanner_match.group(1)], line)) | ||||
|                 else: | ||||
|                   print(line) | ||||
|  | ||||
|           # write out file carving overrides if specified | ||||
|           if (len(mime_tags) > 0) and (len(capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"]) > 0): | ||||
|             with open(capture_config_dict["ZEEK_EXTRACTOR_OVERRIDE_FILE"], "w+") as f: | ||||
|               f.write('#!/usr/bin/env zeek\n') | ||||
|               f.write('\n') | ||||
|               f.write('export {\n') | ||||
|               f.write(f'  redef {Constants.ZEEK_FILE_CARVING_OVERRIDE_FILE_MAP_NAME} : table[string] of string = {{\n') | ||||
|               f.write(",\n".join([f'    ["{m}"] = "{all_mime_maps[m]}"' for m in mime_tags])) | ||||
|               f.write('\n  } &default="bin";\n') | ||||
|               f.write('}\n') | ||||
|  | ||||
|           # hooray | ||||
|           code = d.msgbox(text=Constants.MSG_CONFIG_CAPTURE_SUCCESS.format(",".join(selected_ifaces), Constants.SENSOR_CAPTURE_CONFIG)) | ||||
|  | ||||
|       elif mode == Constants.MSG_CONFIG_MODE_FORWARD: | ||||
|         ##### sensor forwarding (beats) configuration ######################################################################### | ||||
|  | ||||
|         code, fwd_mode = d.menu(Constants.MSG_CONFIG_MODE, choices=[Constants.MSG_CONFIG_FILEBEAT, Constants.MSG_CONFIG_ARKIME, Constants.MSG_CONFIG_METRICBEAT, Constants.MSG_CONFIG_AUDITBEAT, Constants.MSG_CONFIG_SYSLOGBEAT, Constants.MSG_CONFIG_HEATBEAT]) | ||||
|         if code != Dialog.OK: | ||||
|           raise CancelledError | ||||
|  | ||||
|         if (fwd_mode == Constants.ARKIMECAP): | ||||
|           # forwarding configuration for moloch-capture | ||||
|  | ||||
|           # get elasticsearch/kibana connection information from user | ||||
|           elastic_config_dict = input_elasticsearch_connection_info(forwarder=fwd_mode, | ||||
|                                                                     default_es_host=previous_config_values[Constants.BEAT_ES_HOST], | ||||
|                                                                     default_es_port=previous_config_values[Constants.BEAT_ES_PORT], | ||||
|                                                                     default_username=previous_config_values[Constants.BEAT_HTTP_USERNAME], | ||||
|                                                                     default_password=previous_config_values[Constants.BEAT_HTTP_PASSWORD]) | ||||
|           moloch_elastic_config_dict = elastic_config_dict.copy() | ||||
|           # massage the data a bit for how moloch's going to want it in the control_vars.conf file | ||||
|           if Constants.BEAT_HTTP_USERNAME in moloch_elastic_config_dict.keys(): | ||||
|             moloch_elastic_config_dict["ES_USERNAME"] = moloch_elastic_config_dict.pop(Constants.BEAT_HTTP_USERNAME) | ||||
|           if Constants.BEAT_HTTP_PASSWORD in moloch_elastic_config_dict.keys(): | ||||
|             moloch_elastic_config_dict["ES_PASSWORD"] = aggressive_url_encode(moloch_elastic_config_dict.pop(Constants.BEAT_HTTP_PASSWORD)) | ||||
|           moloch_elastic_config_dict = { k.replace('BEAT_', ''): v for k, v in moloch_elastic_config_dict.items() } | ||||
|  | ||||
|           # get list of IP addresses allowed for packet payload retrieval | ||||
|           lines = previous_config_values[Constants.ARKIME_PACKET_ACL].split(",") | ||||
|           lines.append(elastic_config_dict[Constants.BEAT_ES_HOST]) | ||||
|           code, lines = d.editbox_str("\n".join(list(filter(None, list(set(lines))))), title=Constants.MSG_CONFIG_ARKIME_PCAP_ACL) | ||||
|           if code != Dialog.OK: | ||||
|             raise CancelledError | ||||
|           moloch_elastic_config_dict[Constants.ARKIME_PACKET_ACL] = ','.join([ip for ip in list(set(filter(None, [x.strip() for x in lines.split('\n')]))) if isipaddress(ip)]) | ||||
|  | ||||
|           list_results = sorted([f"{k}={v}" for k, v in moloch_elastic_config_dict.items() if ("PASSWORD" not in k) and (not k.startswith("#"))]) | ||||
|  | ||||
|           code = d.yesno(Constants.MSG_CONFIG_FORWARDING_CONFIRM.format(fwd_mode, "\n".join(list_results)), | ||||
|                          yes_label="OK", no_label="Cancel") | ||||
|           if code != Dialog.OK: | ||||
|             raise CancelledError | ||||
|  | ||||
|           previous_config_values = elastic_config_dict.copy() | ||||
|  | ||||
|           # modify specified values in-place in SENSOR_CAPTURE_CONFIG file | ||||
|           elastic_values_re = re.compile(r"\b(" + '|'.join(list(moloch_elastic_config_dict.keys())) + r")\s*=\s*.*?$") | ||||
|           with fileinput.FileInput(Constants.SENSOR_CAPTURE_CONFIG, inplace=True, backup='.bak') as file: | ||||
|             for line in file: | ||||
|               line = line.rstrip("\n") | ||||
|               elastic_key_match = elastic_values_re.search(line) | ||||
|               if elastic_key_match is not None: | ||||
|                 print(elastic_values_re.sub(r"\1=%s" % moloch_elastic_config_dict[elastic_key_match.group(1)], line)) | ||||
|               else: | ||||
|                 print(line) | ||||
|  | ||||
|           # hooray | ||||
|           code = d.msgbox(text=Constants.MSG_CONFIG_FORWARDING_SUCCESS.format(fwd_mode, "\n".join(list_results))) | ||||
|  | ||||
|         elif (fwd_mode == Constants.FILEBEAT) or (fwd_mode == Constants.METRICBEAT) or (fwd_mode == Constants.AUDITBEAT) or (fwd_mode == Constants.SYSLOGBEAT) or (fwd_mode == Constants.HEATBEAT): | ||||
|           # forwarder configuration for beats | ||||
|  | ||||
|           if not os.path.isdir(Constants.BEAT_DIR[fwd_mode]): | ||||
|             # beat dir not found, give up | ||||
|             raise Exception(Constants.MSG_ERROR_FWD_DIR_NOT_FOUND.format(Constants.BEAT_DIR[fwd_mode], fwd_mode)) | ||||
|  | ||||
|           # chdir to the beat directory | ||||
|           os.chdir(Constants.BEAT_DIR[fwd_mode]) | ||||
|  | ||||
|           # check to see if a keystore has already been created for the forwarder | ||||
|           ecode, list_results = run_process(f"{Constants.BEAT_CMD[fwd_mode]} keystore list") | ||||
|           if (ecode == 0) and (len(list_results) > 0): | ||||
|             # it has, do they wish to overwrite it? | ||||
|             if (d.yesno(Constants.MSG_OVERWRITE_CONFIG.format(fwd_mode)) != Dialog.OK): | ||||
|               raise CancelledError | ||||
|  | ||||
|           ecode, create_results = run_process(f"{Constants.BEAT_CMD[fwd_mode]} keystore create --force", stderr=True) | ||||
|           if (ecode != 0): | ||||
|             # keystore creation failed | ||||
|             raise Exception(Constants.MSG_ERROR_KEYSTORE.format(fwd_mode, "\n".join(create_results))) | ||||
|  | ||||
|           forwarder_dict = defaultdict(str) | ||||
|  | ||||
|           if (fwd_mode == Constants.METRICBEAT) or (fwd_mode == Constants.AUDITBEAT) or (fwd_mode == Constants.SYSLOGBEAT) or (fwd_mode == Constants.HEATBEAT): | ||||
|             #### auditbeat/metricbeat/filebeat-syslog ################################################################### | ||||
|             # enter beat configuration (in a few steps) | ||||
|  | ||||
|             if (fwd_mode == Constants.METRICBEAT): | ||||
|               # interval is metricbeat only, the rest is used by both | ||||
|               code, beat_interval = d.rangebox(f"{Constants.MSG_CONFIG_GENERIC.format(fwd_mode)} interval (seconds)", | ||||
|                                               width=60, min=1, max=60, init=30) | ||||
|               if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|                 raise CancelledError | ||||
|               forwarder_dict[Constants.BEAT_INTERVAL] = f"{beat_interval}s" | ||||
|  | ||||
|             # get elasticsearch/kibana connection information from user | ||||
|             forwarder_dict.update(input_elasticsearch_connection_info(forwarder=fwd_mode, | ||||
|                                                                       default_es_host=previous_config_values[Constants.BEAT_ES_HOST], | ||||
|                                                                       default_es_port=previous_config_values[Constants.BEAT_ES_PORT], | ||||
|                                                                       default_kibana_host=previous_config_values[Constants.BEAT_KIBANA_HOST], | ||||
|                                                                       default_kibana_port=previous_config_values[Constants.BEAT_KIBANA_PORT], | ||||
|                                                                       default_username=previous_config_values[Constants.BEAT_HTTP_USERNAME], | ||||
|                                                                       default_password=previous_config_values[Constants.BEAT_HTTP_PASSWORD])) | ||||
|  | ||||
|  | ||||
|           elif (fwd_mode == Constants.FILEBEAT): | ||||
|             #### filebeat ############################################################################################# | ||||
|             while True: | ||||
|               forwarder_dict = defaultdict(str) | ||||
|  | ||||
|               # enter main filebeat configuration | ||||
|               code, values = d.form(Constants.MSG_CONFIG_GENERIC.format(fwd_mode), [ | ||||
|                                     ('Log Path', 1, 1, capture_config_dict["ZEEK_LOG_PATH"],  1, 20, 30, 255), | ||||
|                                     ('Destination Host', 2, 1, "", 2, 20, 30, 255), | ||||
|                                     ('Destination Port', 3, 1, "5044", 3, 20, 6, 5) | ||||
|                                     ]) | ||||
|               values = [x.strip() for x in values] | ||||
|  | ||||
|               if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|                 raise CancelledError | ||||
|  | ||||
|               elif (len(values[0]) <= 0) or (not os.path.isdir(values[0])): | ||||
|                 code = d.msgbox(text=Constants.MSG_ERROR_DIR_NOT_FOUND) | ||||
|  | ||||
|               elif (len(values[1]) <= 0) or (len(values[2]) <= 0) or (not values[2].isnumeric()): | ||||
|                 code = d.msgbox(text=Constants.MSG_ERROR_BAD_HOST) | ||||
|  | ||||
|               else: | ||||
|                 forwarder_dict[Constants.BEAT_LOG_PATTERN_KEY] = os.path.join(os.path.join(values[0], Constants.BEAT_LOG_PATH_SUBDIR), Constants.BEAT_LOG_PATTERN_VAL) | ||||
|                 forwarder_dict[Constants.BEAT_LS_HOST] = values[1] | ||||
|                 forwarder_dict[Constants.BEAT_LS_PORT] = values[2] | ||||
|                 break | ||||
|  | ||||
|             # optionally, filebeat can use SSL if Logstash is configured for it | ||||
|             logstash_ssl = "false" | ||||
|             logstash_ssl_verify = "none" | ||||
|             if (d.yesno("Forward Zeek logs over SSL? (Note: This requires the destination to be similarly configured and a corresponding copy of the client SSL files.)", yes_label="SSL", no_label="Unencrypted") == Dialog.OK): | ||||
|               logstash_ssl = "true" | ||||
|               if (d.yesno("Logstash SSL verification", yes_label="None", no_label="Force Peer") != Dialog.OK): | ||||
|                 logstash_ssl_verify = "force_peer" | ||||
|             forwarder_dict[Constants.BEAT_LS_SSL] = logstash_ssl | ||||
|             forwarder_dict[Constants.BEAT_LS_SSL_VERIFY] = logstash_ssl_verify | ||||
|  | ||||
|             if (forwarder_dict[Constants.BEAT_LS_SSL] == "true"): | ||||
|               while True: | ||||
|                 code, values = d.form(Constants.MSG_CONFIG_GENERIC.format(fwd_mode), [ | ||||
|                                       ('SSL Certificate Authorities File', 1, 1, "", 1, 35, 30, 255), | ||||
|                                       ('SSL Certificate File', 2, 1, "", 2, 35, 30, 255), | ||||
|                                       ('SSL Key File', 3, 1, "", 3, 35, 30, 255), | ||||
|                                       ]) | ||||
|                 values = [x.strip() for x in values] | ||||
|  | ||||
|                 if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|                   raise CancelledError | ||||
|  | ||||
|                 elif ((len(values[0]) <= 0) or (not os.path.isfile(values[0])) or | ||||
|                       (len(values[1]) <= 0) or (not os.path.isfile(values[1])) or | ||||
|                       (len(values[2]) <= 0) or (not os.path.isfile(values[2]))): | ||||
|                   code = d.msgbox(text=Constants.MSG_ERROR_FILE_NOT_FOUND) | ||||
|  | ||||
|                 else: | ||||
|                   forwarder_dict[Constants.BEAT_LS_SSL_CA_CRT] = values[0] | ||||
|                   forwarder_dict[Constants.BEAT_LS_SSL_CLIENT_CRT] = values[1] | ||||
|                   forwarder_dict[Constants.BEAT_LS_SSL_CLIENT_KEY] = values[2] | ||||
|                   break | ||||
|  | ||||
|             else: | ||||
|               forwarder_dict[Constants.BEAT_LS_SSL_CA_CRT] = "" | ||||
|               forwarder_dict[Constants.BEAT_LS_SSL_CLIENT_CRT] = "" | ||||
|               forwarder_dict[Constants.BEAT_LS_SSL_CLIENT_KEY] = "" | ||||
|  | ||||
|             # see if logstash port is open (not a great connection test, but better than nothing!) | ||||
|             code = d.infobox(Constants.MSG_TESTING_CONNECTION.format("Logstash")) | ||||
|             if not check_socket(forwarder_dict[Constants.BEAT_LS_HOST], int(forwarder_dict[Constants.BEAT_LS_PORT])): | ||||
|               code = d.yesno(text=Constants.MSG_TESTING_CONNECTION_FAILURE_LOGSTASH.format("Logstash", forwarder_dict[Constants.BEAT_LS_HOST], forwarder_dict[Constants.BEAT_LS_PORT]), | ||||
|                              yes_label="Ignore Error", no_label="Start Over") | ||||
|               if code != Dialog.OK: | ||||
|                 raise CancelledError | ||||
|  | ||||
|           # outside of filebeat/metricbeat if/else, get confirmation and write out the values to the keystore | ||||
|           if forwarder_dict: | ||||
|  | ||||
|             # get confirmation of parameters before we pull the trigger | ||||
|             code = d.yesno(Constants.MSG_CONFIG_FORWARDING_CONFIRM.format(fwd_mode, "\n".join(sorted([f"{k}={v}" for k, v in forwarder_dict.items() if "PASSWORD" not in k]))), | ||||
|                            yes_label="OK", no_label="Cancel") | ||||
|             if code != Dialog.OK: | ||||
|               raise CancelledError | ||||
|  | ||||
|             previous_config_values = forwarder_dict.copy() | ||||
|  | ||||
|             # it's go time, call keystore add for each item | ||||
|             for k, v in sorted(forwarder_dict.items()): | ||||
|               ecode, add_results = run_process(f"{Constants.BEAT_CMD[fwd_mode]} keystore add {k} --stdin --force", stdin=v, stderr=True) | ||||
|               if (ecode != 0): | ||||
|                 # keystore creation failed | ||||
|                 raise Exception(Constants.MSG_ERROR_KEYSTORE.format(fwd_mode, "\n".join(add_results))) | ||||
|  | ||||
|             # get a final list of parameters that were set to show the user that stuff happened | ||||
|             ecode, list_results = run_process(f"{Constants.BEAT_CMD[fwd_mode]} keystore list") | ||||
|             if (ecode == 0): | ||||
|               code = d.msgbox(text=Constants.MSG_CONFIG_FORWARDING_SUCCESS.format(fwd_mode, "\n".join(list_results))) | ||||
|  | ||||
|             else: | ||||
|               # keystore list failed | ||||
|               raise Exception(Constants.MSG_ERROR_KEYSTORE.format(fwd_mode, "\n".join(add_results))) | ||||
|  | ||||
|           else: | ||||
|             # we got through the config but ended up with no values for configuration! | ||||
|             raise Exception(Constants.MSG_MESSAGE_ERROR.format(Constants.MSG_EMPTY_CONFIG_ERROR)) | ||||
|  | ||||
|     except CancelledError as c: | ||||
|       # d.msgbox(text=Constants.MSG_CANCEL_ERROR) | ||||
|       # just start over | ||||
|       continue | ||||
|  | ||||
|     except Exception as e: | ||||
|       d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format(e)) | ||||
|       raise | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
|   clearquit() | ||||
							
								
								
									
										478
									
								
								Vagrant/resources/malcolm/shared/bin/configure-interfaces.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										478
									
								
								Vagrant/resources/malcolm/shared/bin/configure-interfaces.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,478 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| # script for configuring sensor network interface controller(s) | ||||
|  | ||||
| import locale | ||||
| import os | ||||
| import sys | ||||
| import netifaces | ||||
| import fileinput | ||||
| import re | ||||
| from dialog import Dialog | ||||
| from debinterface.interfaces import Interfaces | ||||
| from sensorcommon import * | ||||
|  | ||||
| class Constants: | ||||
|   DHCP = 'dhcp' | ||||
|   STATIC = 'static' | ||||
|   UNASSIGNED = 'manual' | ||||
|  | ||||
|   DEV_IDENTIFIER_FILE = '/etc/installer' | ||||
|   DEV_UNKNOWN = 'unknown' | ||||
|   DEV_AGGREGATOR = 'aggregator' | ||||
|   DEV_SENSOR = 'sensor' | ||||
|   DEV_VALID = {DEV_AGGREGATOR, DEV_SENSOR} | ||||
|   MSG_ERR_DEV_INVALID = f'Could not determine installation type (not one of {DEV_VALID})' | ||||
|  | ||||
|   CONFIG_IFACE = 'Interface Configuration' | ||||
|  | ||||
|   SENSOR_BACKUP_CONFIG = '/tmp/sensor_interface.bak' | ||||
|   SENSOR_INTERFACES_CONFIG = '/etc/network/interfaces.d/sensor' | ||||
|   ETC_HOSTS = '/etc/hosts' | ||||
|  | ||||
|   TIME_SYNC_NTP = 'ntp' | ||||
|   TIME_SYNC_HTPDATE = 'htpdate' | ||||
|   TIME_SYNC_HTPDATE_CRON = '/etc/cron.d/htpdate' | ||||
|   TIME_SYNC_HTPDATE_TEST_COMMAND = '/usr/sbin/htpdate -4 -a -b -d' | ||||
|   TIME_SYNC_HTPDATE_COMMAND = '/usr/sbin/htpdate -4 -a -b -l -s' | ||||
|   TIME_SYNC_NTP_CONFIG = '/etc/ntp.conf' | ||||
|  | ||||
|   MSG_CONFIG_MODE = 'Configuration Mode' | ||||
|   MSG_BACKGROUND_TITLE = 'Sensor Configuration' | ||||
|   MSG_CONFIG_HOST = ('Hostname', 'Configure sensor hostname') | ||||
|   MSG_CONFIG_INTERFACE = ('Interface', 'Configure an interface\'s IP address') | ||||
|   MSG_CONFIG_TIME_SYNC = ('Time Sync', 'Configure time synchronization') | ||||
|   MSG_CONFIG_STATIC_TITLE = 'Provide the values for static IP configuration' | ||||
|   MSG_ERR_ROOT_REQUIRED = 'Elevated privileges required, run as root' | ||||
|   MSG_ERR_BAD_HOST = 'Invalid host or port' | ||||
|   MSG_MESSAGE_DHCP = 'Configuring for DHCP provided address...' | ||||
|   MSG_MESSAGE_ERROR = 'Error: {}\n\nPlease try again.' | ||||
|   MSG_MESSAGE_STATIC = 'Configuring for static IP address...' | ||||
|   MSG_MESSAGE_UNASSIGNED = 'Configuring for no IP address...' | ||||
|   MSG_NETWORK_START_ERROR = 'Error occured while configuring network interface!\n\n' | ||||
|   MSG_NETWORK_START_SUCCESS = 'Network interface configuration completed successfully!\n\n' | ||||
|   MSG_NETWORK_STOP_ERROR = 'Error occured while bringing down the network interface!\n\n' | ||||
|   MSG_NETWORK_STOP_SUCCESS = 'Brought down the network interface successfully!\n\n' | ||||
|   MSG_TIME_SYNC_TYPE = 'Select time synchronization method' | ||||
|   MSG_TIME_SYNC_HTPDATE_CONFIG = 'Provide values for HTTP/HTTPS Server' | ||||
|   MSG_TIME_SYNC_TEST_SUCCESS = 'Server time retrieved successfully!\n\n' | ||||
|   MSG_TIME_SYNC_CONFIG_SUCCESS = 'Time synchronization configured successfully!\n\n' | ||||
|   MSG_TIME_SYNC_TEST_FAILURE = 'Server time could not be retrieved. Ignore error?\n\n' | ||||
|   MSG_TIME_SYNC_NTP_CONFIG = 'Provide values for NTP Server' | ||||
|   MSG_TESTING_CONNECTION = 'Testing {} connection...' | ||||
|   MSG_TESTING_CONNECTION_FAILURE = "Connection error: could not connect to {}:{}" | ||||
|   MSG_SET_HOSTNAME_CURRENT = 'Current sensor identification information\n\n' | ||||
|   MSG_SET_HOSTNAME_SUCCESS = 'Set sensor hostname successfully!\n\n' | ||||
|   MSG_IDENTIFY_NICS = 'Do you need help identifying network interfaces?' | ||||
|   MSG_SELECT_INTERFACE = 'Select interface to configure' | ||||
|   MSG_SELECT_BLINK_INTERFACE = 'Select capture interface to identify' | ||||
|   MSG_BLINK_INTERFACE = '{} will blink for {} seconds' | ||||
|   MSG_SELECT_SOURCE = 'Select address source' | ||||
|   MSG_WELCOME_TITLE = 'Welcome to the sensor network interface controller utility!' | ||||
|  | ||||
| # the main dialog window used for the duration of this tool | ||||
| d = Dialog(dialog='dialog', autowidgetsize=True) | ||||
| d.set_background_title(Constants.MSG_BACKGROUND_TITLE) | ||||
|  | ||||
| ################################################################################################### | ||||
| # if the given interface is up, "ifdown" it | ||||
| def network_stop(selected_iface): | ||||
|   iface_state = "unknown" | ||||
|   with open(f"/sys/class/net/{selected_iface}/operstate", 'r') as f: | ||||
|     iface_state = f.readline().strip() | ||||
|  | ||||
|   if (iface_state == "up"): | ||||
|     command = f"ifdown {selected_iface}" | ||||
|   else: | ||||
|     command = f"cat /sys/class/net/{selected_iface}/operstate" | ||||
|  | ||||
|   return run_process(command, stderr=True) | ||||
|  | ||||
| ################################################################################################### | ||||
| # if the given interface is not up, "ifup" it | ||||
| def network_start(selected_iface): | ||||
|   iface_state = "unknown" | ||||
|   with open(f"/sys/class/net/{selected_iface}/operstate", 'r') as f: | ||||
|     iface_state = f.readline().strip() | ||||
|  | ||||
|   if (iface_state != "up"): | ||||
|     command = f"ifup {selected_iface}" | ||||
|   else: | ||||
|     command = f"cat /sys/class/net/{selected_iface}/operstate" | ||||
|  | ||||
|   return run_process(command, stderr=True) | ||||
|  | ||||
| ################################################################################################### | ||||
| # for a given interface, bring it down, write its new settings, and bring it back up | ||||
| def write_and_display_results(interfaces, selected_iface): | ||||
|  | ||||
|   ecode, stop_results = network_stop(selected_iface) | ||||
|   stop_results = list(filter(lambda x: (len(x) > 0) and ('Internet Systems' not in x) and ('Copyright' not in x) and ('All rights' not in x) and ('For info' not in x), stop_results)) | ||||
|   if ecode == 0: | ||||
|     stop_text = Constants.MSG_NETWORK_STOP_SUCCESS | ||||
|   else: | ||||
|     stop_text = Constants.MSG_NETWORK_STOP_ERROR | ||||
|  | ||||
|   interfaces.writeInterfaces() | ||||
|  | ||||
|   ecode, start_results = network_start(selected_iface) | ||||
|   start_results = list(filter(lambda x: (len(x.strip()) > 0) and ('Internet Systems' not in x) and ('Copyright' not in x) and ('All rights' not in x) and ('For info' not in x), start_results)) | ||||
|   if ecode == 0: | ||||
|     start_text = Constants.MSG_NETWORK_START_SUCCESS | ||||
|   else: | ||||
|     start_text = Constants.MSG_NETWORK_START_ERROR | ||||
|  | ||||
|   code = d.msgbox(stop_text + "\n".join(stop_results) + "\n\n. . .\n\n" + start_text + "\n".join(start_results)) | ||||
|  | ||||
| ################################################################################################### | ||||
| ################################################################################################### | ||||
| def main(): | ||||
|   locale.setlocale(locale.LC_ALL, '') | ||||
|  | ||||
|   # make sure we are being run as root | ||||
|   if os.getuid() != 0: | ||||
|     print(Constants.MSG_ERR_ROOT_REQUIRED) | ||||
|     sys.exit(1) | ||||
|  | ||||
|   # what are we (sensor vs. aggregator) | ||||
|   installation = Constants.DEV_UNKNOWN | ||||
|   modeChoices = [] | ||||
|   try: | ||||
|     with open(Constants.DEV_IDENTIFIER_FILE, 'r') as f: | ||||
|       installation = f.readline().strip() | ||||
|   except: | ||||
|     pass | ||||
|   if (installation == Constants.DEV_SENSOR): | ||||
|     modeChoices = [Constants.MSG_CONFIG_INTERFACE, Constants.MSG_CONFIG_HOST, Constants.MSG_CONFIG_TIME_SYNC] | ||||
|   elif (installation == Constants.DEV_AGGREGATOR): | ||||
|     modeChoices = [Constants.MSG_CONFIG_HOST, Constants.MSG_CONFIG_TIME_SYNC] | ||||
|   else: | ||||
|     print(Constants.MSG_ERR_DEV_INVALID) | ||||
|     sys.exit(1) | ||||
|  | ||||
|   start_dir = os.getcwd() | ||||
|   quit_flag = False | ||||
|  | ||||
|   while not quit_flag: | ||||
|     os.chdir(start_dir) | ||||
|     try: | ||||
|  | ||||
|       # welcome | ||||
|       code = d.yesno(Constants.MSG_WELCOME_TITLE, yes_label="Continue", no_label="Quit") | ||||
|       if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|         quit_flag = True | ||||
|         raise CancelledError | ||||
|  | ||||
|       # configuring an interface or setting the hostname? | ||||
|       code, config_mode = d.menu(Constants.MSG_CONFIG_MODE, choices=modeChoices) | ||||
|       if code != Dialog.OK: | ||||
|         quit_flag = True | ||||
|         raise CancelledError | ||||
|  | ||||
|       if (config_mode == Constants.MSG_CONFIG_HOST[0]): | ||||
|         ##### system hostname configuration ################################################################################################## | ||||
|  | ||||
|         # get current host/identification information | ||||
|         ecode, host_get_output = run_process('hostnamectl', stderr=True) | ||||
|         if (ecode == 0): | ||||
|           emsg_str = '\n'.join(host_get_output) | ||||
|           code = d.msgbox(text=f"{Constants.MSG_SET_HOSTNAME_CURRENT}{emsg_str}") | ||||
|  | ||||
|           code, hostname_get_output = run_process('hostname', stderr=False) | ||||
|           if (code == 0) and (len(hostname_get_output) > 0): | ||||
|             old_hostname = hostname_get_output[0].strip() | ||||
|           else: | ||||
|             old_hostname = "" | ||||
|  | ||||
|           # user input for new hostname | ||||
|           while True: | ||||
|             code, new_hostname = d.inputbox("Sensor hostname", init=old_hostname) | ||||
|             if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|               raise CancelledError | ||||
|             elif (len(new_hostname) <= 0): | ||||
|               code = d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format(f'Invalid hostname specified')) | ||||
|             else: | ||||
|               break | ||||
|  | ||||
|           # set new hostname | ||||
|           ecode, host_set_output = run_process(f'hostnamectl set-hostname {new_hostname.strip()}', stderr=True) | ||||
|           if (ecode == 0): | ||||
|             ecode, host_get_output = run_process('hostnamectl', stderr=True) | ||||
|             emsg_str = '\n'.join(host_get_output) | ||||
|             code = d.msgbox(text=f"{Constants.MSG_SET_HOSTNAME_SUCCESS}{emsg_str}") | ||||
|  | ||||
|             # modify /etc/hosts 127.0.1.1 entry | ||||
|             local_hosts_re = re.compile(r"^\s*127\.0\.1\.1\b") | ||||
|             with fileinput.FileInput(Constants.ETC_HOSTS, inplace=True, backup='.bak') as file: | ||||
|               for line in file: | ||||
|                 if local_hosts_re.search(line) is not None: | ||||
|                   print(f"127.0.1.1\t{new_hostname}") | ||||
|                 else: | ||||
|                   print(line, end='') | ||||
|  | ||||
|           else: | ||||
|             # error running hostnamectl set-hostname | ||||
|             emsg_str = '\n'.join(host_get_output) | ||||
|             code = d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format(f"Getting hostname failed with {ecode}:{emsg_str}")) | ||||
|  | ||||
|         else: | ||||
|           # error running hostnamectl | ||||
|           emsg_str = '\n'.join(host_get_output) | ||||
|           code = d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format(f"Getting hostname failed with {ecode}:{emsg_str}")) | ||||
|  | ||||
|       elif (config_mode == Constants.MSG_CONFIG_TIME_SYNC[0]): | ||||
|         ##### time synchronization configuration############################################################################################## | ||||
|         time_sync_mode = '' | ||||
|         code = Dialog.OK | ||||
|         while (len(time_sync_mode) == 0) and (code == Dialog.OK): | ||||
|           code, time_sync_mode = d.radiolist(Constants.MSG_TIME_SYNC_TYPE, choices=[(Constants.TIME_SYNC_HTPDATE, 'Use a Malcolm server (or another HTTP/HTTPS server)', (installation == Constants.DEV_SENSOR)), | ||||
|                                                                                     (Constants.TIME_SYNC_NTP, 'Use an NTP server', False)]) | ||||
|         if (code != Dialog.OK): | ||||
|           raise CancelledError | ||||
|  | ||||
|         elif (time_sync_mode == Constants.TIME_SYNC_HTPDATE): | ||||
|           # sync time via htpdate, run via cron | ||||
|  | ||||
|           http_host = '' | ||||
|           http_port = '' | ||||
|           while True: | ||||
|             # host/port for htpdate | ||||
|             code, values = d.form(Constants.MSG_TIME_SYNC_HTPDATE_CONFIG, | ||||
|                                   [('Host', 1, 1, '', 1,  25, 30, 255), | ||||
|                                    ('Port', 2, 1, '9200', 2, 25, 6, 5)]) | ||||
|             values = [x.strip() for x in values] | ||||
|  | ||||
|             if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|               raise CancelledError | ||||
|  | ||||
|             elif (len(values[0]) <= 0) or (len(values[1]) <= 0) or (not values[1].isnumeric()): | ||||
|               code = d.msgbox(text=Constants.MSG_ERR_BAD_HOST) | ||||
|  | ||||
|             else: | ||||
|               http_host = values[0] | ||||
|               http_port = values[1] | ||||
|               break | ||||
|  | ||||
|           # test with htpdate to see if we can connect | ||||
|           ecode, test_output = run_process(f"{Constants.TIME_SYNC_HTPDATE_TEST_COMMAND} {http_host}:{http_port}") | ||||
|           if ecode == 0: | ||||
|             emsg_str = '\n'.join(test_output) | ||||
|             code = d.msgbox(text=f"{Constants.MSG_TIME_SYNC_TEST_SUCCESS}{emsg_str}") | ||||
|           else: | ||||
|             emsg_str = '\n'.join(test_output) | ||||
|             code = d.yesno(text=f"{Constants.MSG_TIME_SYNC_TEST_FAILURE}{emsg_str}", | ||||
|                            yes_label="Ignore Error", no_label="Start Over") | ||||
|             if code != Dialog.OK: | ||||
|               raise CancelledError | ||||
|  | ||||
|           # get polling interval | ||||
|           code, htpdate_interval = d.rangebox(f"Time synchronization polling interval (minutes)", | ||||
|                                               width=60, min=1, max=60, init=15) | ||||
|           if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|             raise CancelledError | ||||
|  | ||||
|           # stop and disable the ntp process | ||||
|           run_process('/bin/systemctl stop ntp') | ||||
|           run_process('/bin/systemctl disable ntp') | ||||
|  | ||||
|           # write out htpdate file for cron | ||||
|           with open(Constants.TIME_SYNC_HTPDATE_CRON, 'w+') as f: | ||||
|             f.write('SHELL=/bin/bash\n') | ||||
|             f.write('PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin\n') | ||||
|             f.write('\n') | ||||
|             f.write(f'*/{htpdate_interval} * * * * root {Constants.TIME_SYNC_HTPDATE_COMMAND} {http_host}:{http_port}\n') | ||||
|             f.write('\n') | ||||
|           code = d.msgbox(text=f"{Constants.MSG_TIME_SYNC_CONFIG_SUCCESS}") | ||||
|  | ||||
|         elif (time_sync_mode == Constants.TIME_SYNC_NTP): | ||||
|           # sync time via ntp, run via service | ||||
|  | ||||
|           ntp_host = '' | ||||
|           while True: | ||||
|             # host/port for ntp | ||||
|             code, values = d.form(Constants.MSG_TIME_SYNC_NTP_CONFIG, | ||||
|                                   [('Host', 1, 1, '', 1,  25, 30, 255)]) | ||||
|             values = [x.strip() for x in values] | ||||
|  | ||||
|             if (code == Dialog.CANCEL) or (code == Dialog.ESC): | ||||
|               raise CancelledError | ||||
|  | ||||
|             elif (len(values[0]) <= 0): | ||||
|               code = d.msgbox(text=Constants.MSG_ERR_BAD_HOST) | ||||
|  | ||||
|             else: | ||||
|               ntp_host = values[0] | ||||
|               break | ||||
|  | ||||
|           # disable htpdate (no need to have two sync-ers) by removing it from cron | ||||
|           if os.path.exists(Constants.TIME_SYNC_HTPDATE_CRON): | ||||
|             os.remove(Constants.TIME_SYNC_HTPDATE_CRON) | ||||
|  | ||||
|           # write out ntp config file (changing values in place) | ||||
|           server_written = False | ||||
|           server_re = re.compile(r"^\s*#?\s*(server)\s*.+?$") | ||||
|           with fileinput.FileInput(Constants.TIME_SYNC_NTP_CONFIG, inplace=True, backup='.bak') as file: | ||||
|             for line in file: | ||||
|               line = line.rstrip("\n") | ||||
|               server_match = server_re.search(line) | ||||
|               if server_match is not None: | ||||
|                 if not server_written: | ||||
|                   print(f'server {ntp_host}') | ||||
|                   server_written = True | ||||
|                 else: | ||||
|                   print(f"{'' if line.startswith('#') else '#'}{line}") | ||||
|               else: | ||||
|                 print(line) | ||||
|  | ||||
|           # enable and start the ntp process | ||||
|           run_process('/bin/systemctl stop ntp') | ||||
|           run_process('/bin/systemctl enable ntp') | ||||
|           ecode, start_output = run_process('/bin/systemctl start ntp', stderr=True) | ||||
|           if ecode == 0: | ||||
|             code = d.msgbox(text=f"{Constants.MSG_TIME_SYNC_CONFIG_SUCCESS}") | ||||
|           else: | ||||
|             code = d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format('\n'.join(start_output))) | ||||
|  | ||||
|         else: | ||||
|           raise CancelledError | ||||
|  | ||||
|       else: | ||||
|         ##### interface IP address configuration ############################################################################################# | ||||
|  | ||||
|         # read configuration from /etc/network/interfaces.d/sensor (or /etc/network/interfaces if for some reason it doesn't exist) | ||||
|         if os.path.isfile(Constants.SENSOR_INTERFACES_CONFIG): | ||||
|           interfaces = Interfaces(interfaces_path=Constants.SENSOR_INTERFACES_CONFIG, backup_path=Constants.SENSOR_BACKUP_CONFIG) | ||||
|         else: | ||||
|           interfaces = Interfaces(backup_path=Constants.SENSOR_BACKUP_CONFIG) | ||||
|  | ||||
|         # determine a list of available (non-virtual) adapters | ||||
|         available_adapters = get_available_adapters() | ||||
|  | ||||
|         while (len(available_adapters) > 0) and (d.yesno(Constants.MSG_IDENTIFY_NICS) == Dialog.OK): | ||||
|           code, blinky_iface = d.radiolist(Constants.MSG_SELECT_BLINK_INTERFACE, choices=[(adapter.name, adapter.description, False) for adapter in available_adapters]) | ||||
|           if (code == Dialog.OK) and (len(blinky_iface) > 0): | ||||
|             if (d.yesno(Constants.MSG_BLINK_INTERFACE.format(blinky_iface, NIC_BLINK_SECONDS), yes_label="Ready", no_label="Cancel") == Dialog.OK): | ||||
|               identify_adapter(adapter=blinky_iface, duration=NIC_BLINK_SECONDS, background=True) | ||||
|               code = d.pause(f"Identifying {blinky_iface}", seconds=NIC_BLINK_SECONDS, width=60, height=15) | ||||
|           elif (code != Dialog.OK): | ||||
|             break | ||||
|  | ||||
|         code, tag = d.menu(Constants.MSG_SELECT_INTERFACE, choices=[(adapter.name, adapter.description) for adapter in available_adapters]) | ||||
|         if code != Dialog.OK: | ||||
|           raise CancelledError | ||||
|  | ||||
|         # which interface are wer configuring? | ||||
|         selected_iface = tag | ||||
|  | ||||
|         # check if selected_iface already has entry in system configuration | ||||
|         configured_iface = None | ||||
|         for adapter in interfaces.adapters: | ||||
|           item = adapter.export() | ||||
|           if item['name'] == selected_iface: | ||||
|             configured_iface = item | ||||
|             break | ||||
|  | ||||
|         # if it was already configured, remove from configured adapter list to be replaced by the new settings | ||||
|         if configured_iface is not None: | ||||
|           interfaces.removeAdapterByName(selected_iface) | ||||
|  | ||||
|         # static, dynamic, or unassigned IP address? | ||||
|         code, tag = d.menu(Constants.MSG_SELECT_SOURCE, choices=[(Constants.STATIC, 'Static IP (recommended)'), (Constants.DHCP, 'Dynamic IP'), (Constants.UNASSIGNED, 'No IP')]) | ||||
|         if code != Dialog.OK: | ||||
|           raise CancelledError | ||||
|  | ||||
|         if tag == Constants.DHCP: | ||||
|           # DHCP ########################################################## | ||||
|           code = d.infobox(Constants.MSG_MESSAGE_DHCP) | ||||
|  | ||||
|           interfaces.addAdapter({ | ||||
|             'name': selected_iface, | ||||
|             'auto': True, | ||||
|             'hotplug': True, | ||||
|             'addrFam': 'inet', | ||||
|             'source': Constants.DHCP}, 0) | ||||
|  | ||||
|           write_and_display_results(interfaces, selected_iface) | ||||
|  | ||||
|         elif tag == Constants.UNASSIGNED: | ||||
|           # unassigned (but up) ########################################### | ||||
|           code = d.infobox(Constants.MSG_MESSAGE_UNASSIGNED) | ||||
|  | ||||
|           interfaces.addAdapter({ | ||||
|             'name': selected_iface, | ||||
|             'auto': True, | ||||
|             'hotplug': True, | ||||
|             'addrFam': 'inet', | ||||
|             'source': Constants.UNASSIGNED, | ||||
|             'pre-up': 'ip link set dev $IFACE up', | ||||
|             'post-up': '/usr/local/bin/nic-capture-setup.sh $IFACE', | ||||
|             'post-down': 'ip link set dev $IFACE down'}, 0) | ||||
|  | ||||
|           write_and_display_results(interfaces, selected_iface) | ||||
|  | ||||
|         elif tag == Constants.STATIC: | ||||
|           # static ######################################################## | ||||
|  | ||||
|           # see if the adapter currently has an IP address, use it as a starting suggestion | ||||
|           try: | ||||
|             previous_ip = netifaces.ifaddresses(selected_iface)[netifaces.AF_INET][0]['addr'] | ||||
|             previous_gw = '.'.join(previous_ip.split('.')[0:3] + ['1']) | ||||
|           except Exception as e: | ||||
|             code = d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format(e)) | ||||
|             previous_ip = "192.168.0.10" | ||||
|             previous_gw = "192.168.0.1" | ||||
|           if previous_ip.startswith('172.'): | ||||
|             previous_mask = "255.255.0.0" | ||||
|           elif previous_ip.startswith('10.'): | ||||
|             previous_mask = "255.0.0.0" | ||||
|           else: | ||||
|             previous_mask = "255.255.255.0" | ||||
|  | ||||
|           while True: | ||||
|             code, values = d.form(Constants.MSG_CONFIG_STATIC_TITLE, [ | ||||
|                                   # title, row_1, column_1, field, row_1, column_20, field_length, input_length | ||||
|                                   ('IP Address', 1, 1, previous_ip, 1, 20, 15, 15), | ||||
|                                   # title, row_2, column_1, field, row_2, column_20, field_length, input_length | ||||
|                                   ('Netmask', 2, 1, previous_mask, 2, 20, 15, 15), | ||||
|                                   # title, row_3, column_1, field, row_3, column_20, field_length, input_length | ||||
|                                   ('Gateway', 3, 1, previous_gw, 3, 20, 15, 15) | ||||
|                                   ]) | ||||
|             values = [x.strip() for x in values] | ||||
|  | ||||
|             if (code == Dialog.CANCEL or code == Dialog.ESC): | ||||
|               raise CancelledError | ||||
|  | ||||
|             elif (len(values[0]) <= 0) or (len(values[1]) <= 0) or (len(values[2]) <= 0): | ||||
|               code = d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format("Invalid value(s), please try again")) | ||||
|  | ||||
|             else: | ||||
|               code = d.infobox(Constants.MSG_MESSAGE_STATIC) | ||||
|  | ||||
|               interfaces.addAdapter({ | ||||
|                 'name': selected_iface, | ||||
|                 'auto': True, | ||||
|                 'hotplug': True, | ||||
|                 'addrFam': 'inet', | ||||
|                 'source': Constants.STATIC, | ||||
|                 'address': values[0], | ||||
|                 'netmask': values[1], | ||||
|                 'gateway': values[2]}, 0) | ||||
|  | ||||
|               write_and_display_results(interfaces, selected_iface) | ||||
|               break | ||||
|  | ||||
|     except CancelledError as c: | ||||
|       # d.msgbox(text=Constants.MSG_CANCEL_ERROR) | ||||
|       # just start over | ||||
|       continue | ||||
|  | ||||
|     except Exception as e: | ||||
|       d.msgbox(text=Constants.MSG_MESSAGE_ERROR.format(e)) | ||||
|       raise | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
|   clearquit() | ||||
							
								
								
									
										15
									
								
								Vagrant/resources/malcolm/shared/bin/docker-load-wait.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										15
									
								
								Vagrant/resources/malcolm/shared/bin/docker-load-wait.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| function finish { | ||||
|   pkill -f "zenity.*Preparing Malcolm" | ||||
| } | ||||
|  | ||||
| if [[ -f /malcolm_images.tar.gz ]] || pgrep -f "docker load" >/dev/null 2>&1 || pgrep -f "docker-untar" >/dev/null 2>&1; then | ||||
|   trap finish EXIT | ||||
|   yes | zenity --progress --pulsate --no-cancel --auto-close --text "Malcolm Docker images are loading, please wait..." --title "Preparing Malcolm" & | ||||
|   while [[ -f /malcolm_images.tar.gz ]] || pgrep -f "docker load" >/dev/null 2>&1 || pgrep -f "docker-untar" >/dev/null 2>&1; do | ||||
|     sleep 2 | ||||
|   done | ||||
| fi | ||||
							
								
								
									
										51
									
								
								Vagrant/resources/malcolm/shared/bin/docker-uid-gid-setup.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										51
									
								
								Vagrant/resources/malcolm/shared/bin/docker-uid-gid-setup.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,51 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| set -e | ||||
|  | ||||
| unset ENTRYPOINT_CMD | ||||
| unset ENTRYPOINT_ARGS | ||||
| [ "$#" -ge 1 ] && ENTRYPOINT_CMD="$1" && [ "$#" -gt 1 ] && shift 1 && ENTRYPOINT_ARGS=( "$@" ) | ||||
|  | ||||
| # modify the UID/GID for the default user/group (for example, 1000 -> 1001) | ||||
| usermod --non-unique --uid ${PUID:-${DEFAULT_UID}} ${PUSER} | ||||
| groupmod --non-unique --gid ${PGID:-${DEFAULT_GID}} ${PGROUP} | ||||
|  | ||||
| # change user/group ownership of any files/directories belonging to the original IDs | ||||
| if [[ -n ${PUID} ]] && [[ "${PUID}" != "${DEFAULT_UID}" ]]; then | ||||
|   find / -path /sys -prune -o -path /proc -prune -o -user ${DEFAULT_UID} -exec chown -f ${PUID} "{}" \; || true | ||||
| fi | ||||
| if [[ -n ${PGID} ]] && [[ "${PGID}" != "${DEFAULT_GID}" ]]; then | ||||
|   find / -path /sys -prune -o -path /proc -prune -o -group ${DEFAULT_GID} -exec chown -f :${PGID} "{}" \; || true | ||||
| fi | ||||
|  | ||||
| # if there are semicolon-separated PUSER_CHOWN entries explicitly specified, chown them too | ||||
| if [[ -n ${PUSER_CHOWN} ]]; then | ||||
|   IFS=';' read -ra ENTITIES <<< "${PUSER_CHOWN}" | ||||
|   for ENTITY in "${ENTITIES[@]}"; do | ||||
|     chown -R ${PUSER}:${PGROUP} "${ENTITY}" || true | ||||
|   done | ||||
| fi | ||||
|  | ||||
| # determine if we are now dropping privileges to exec ENTRYPOINT_CMD | ||||
| if [[ "$PUSER_PRIV_DROP" == "true" ]]; then | ||||
|   EXEC_USER="${PUSER}" | ||||
|   USER_HOME="$(getent passwd ${PUSER} | cut -d: -f6)" | ||||
| else | ||||
|   EXEC_USER="${USER:-root}" | ||||
|   USER_HOME="${HOME:-/root}" | ||||
| fi | ||||
|  | ||||
| # execute the entrypoint command specified | ||||
| su --shell /bin/bash --preserve-environment ${EXEC_USER} << EOF | ||||
| export USER="${EXEC_USER}" | ||||
| export HOME="${USER_HOME}" | ||||
| whoami | ||||
| id | ||||
| if [ ! -z "${ENTRYPOINT_CMD}" ]; then | ||||
|   if [ -z "${ENTRYPOINT_ARGS}" ]; then | ||||
|     "${ENTRYPOINT_CMD}" | ||||
|   else | ||||
|     "${ENTRYPOINT_CMD}" $(printf "%q " "${ENTRYPOINT_ARGS[@]}") | ||||
|   fi | ||||
| fi | ||||
| EOF | ||||
							
								
								
									
										21
									
								
								Vagrant/resources/malcolm/shared/bin/dod-login-banner.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										21
									
								
								Vagrant/resources/malcolm/shared/bin/dod-login-banner.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # The operating system must display the Standard Mandatory DoD Notice and Consent Banner before granting local or remote access to the system. | ||||
| #  V-56585 / SV-70845r1_rule | ||||
| # https://www.stigviewer.com/stig/general_purpose_operating_system_srg/2015-06-26/finding/V-56585 | ||||
|  | ||||
| BANNER_FILE="$(mktemp)" | ||||
|  | ||||
| cat << 'EOF' > "$BANNER_FILE" | ||||
| You are accessing a U.S. Government (USG) Information System (IS) that is provided for USG-authorized use only. | ||||
| By using this IS (which includes any device attached to this IS), you consent to the following conditions: | ||||
| -The USG routinely intercepts and monitors communications on this IS for purposes including, but not limited to, penetration testing, COMSEC monitoring, network operations and defense, personnel misconduct (PM), law enforcement (LE), and counterintelligence (CI) investigations. | ||||
| -At any time, the USG may inspect and seize data stored on this IS. | ||||
| -Communications using, or data stored on, this IS are not private, are subject to routine monitoring, interception, and search, and may be disclosed or used for any USG-authorized purpose. | ||||
| -This IS includes security measures (e.g., authentication and access controls) to protect USG interests--not for your personal benefit or privacy. | ||||
| -Notwithstanding the above, using this IS does not constitute consent to PM, LE or CI investigative searching or monitoring of the content of privileged communications, or work product, related to personal representation or services by attorneys, psychotherapists, or clergy, and their assistants. Such communications and work product are private and confidential. See User Agreement for details. | ||||
| EOF | ||||
|  | ||||
| zenity --text-info --title "U.S. DoD Notice and Consent" --filename="$BANNER_FILE" || pkill -SIGTERM -f lxsession | ||||
|  | ||||
| rm -f "$BANNER_FILE" | ||||
							
								
								
									
										188
									
								
								Vagrant/resources/malcolm/shared/bin/elastic_index_size_prune.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										188
									
								
								Vagrant/resources/malcolm/shared/bin/elastic_index_size_prune.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,188 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| import argparse | ||||
| import humanfriendly | ||||
| import json | ||||
| import re | ||||
| import requests | ||||
| import os | ||||
| import sys | ||||
|  | ||||
| ################################################################################################### | ||||
| debug = False | ||||
| scriptName = os.path.basename(__file__) | ||||
| scriptPath = os.path.dirname(os.path.realpath(__file__)) | ||||
|  | ||||
| ################################################################################################### | ||||
| # print to stderr | ||||
| def eprint(*args, **kwargs): | ||||
|   print(*args, file=sys.stderr, **kwargs) | ||||
|  | ||||
| ################################################################################################### | ||||
| # convenient boolean argument parsing | ||||
| def str2bool(v): | ||||
|   if v.lower() in ('yes', 'true', 't', 'y', '1'): | ||||
|     return True | ||||
|   elif v.lower() in ('no', 'false', 'f', 'n', '0'): | ||||
|     return False | ||||
|   else: | ||||
|     raise argparse.ArgumentTypeError('Boolean value expected.') | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| def main(): | ||||
|   global debug | ||||
|  | ||||
|   parser = argparse.ArgumentParser(description=scriptName, add_help=True, usage='{} <arguments>'.format(scriptName)) | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_DEBUG', default='False')), help="Verbose output") | ||||
|   parser.add_argument('-i', '--index', dest='index', metavar='<str>', type=str, default=os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_INDEX', 'sessions2-*'), help='Index pattern') | ||||
|   parser.add_argument('-e', '--elastic', dest='elasticUrl', metavar='<protocol://host:port>', type=str, default=os.getenv('ELASTICSEARCH_URL', 'http://elasticsearch:9200'), help='Elasticsearch URL') | ||||
|   parser.add_argument('--node', dest='node', metavar='<str>', type=str, default=os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_NODE', ''), help='Node IDs or names') | ||||
|   parser.add_argument('-l', '--limit', dest='limit', metavar='<str>', type=str, default=os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_LIMIT', '0'), help='Index pattern size limit (e.g., 100gb, 25%, ...)') | ||||
|   parser.add_argument('-n', '--dry-run', dest='dryrun', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_DRY_RUN', default='False')), help="Dry run") | ||||
|   parser.add_argument('-p', '--primary', dest='primaryTotals', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_PRIMARY', default='False')), help="Perform totals based on primaries (vs. totals)") | ||||
|   parser.add_argument('--name-sort', dest='nameSorted', type=str2bool, nargs='?', const=True, default=str2bool(os.getenv('ELASTICSEARCH_INDEX_SIZE_PRUNE_NAME_SORT', default='False')), help="Sort indices by name (vs. creation date)") | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except Exception as e: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   debug = args.debug | ||||
|   if debug: | ||||
|     eprint(os.path.join(scriptPath, scriptName)) | ||||
|     eprint("Arguments: {}".format(sys.argv[1:])) | ||||
|     eprint("Arguments: {}".format(args)) | ||||
|   else: | ||||
|     sys.tracebacklimit = 0 | ||||
|  | ||||
|   # short-circuit without printing anything else | ||||
|   if (args.limit == '0'): | ||||
|     return | ||||
|  | ||||
|   esInfoResponse = requests.get(args.elasticUrl) | ||||
|   esInfo = esInfoResponse.json() | ||||
|   elasticVersion = esInfo['version']['number'] | ||||
|   if debug: | ||||
|     eprint(f'Elasticsearch version is {elasticVersion}') | ||||
|  | ||||
|   totalIndices = 0 | ||||
|   limitMegabytes = None | ||||
|   limitPercent = None | ||||
|   if args.limit is not None: | ||||
|     if args.limit.isdigit(): | ||||
|       # assume megabytes | ||||
|       limitMegabytes = int(args.limit) | ||||
|     elif re.match(r'^\d+(\.\d+)?\s*[kmgtp]?b?$', args.limit, flags=re.IGNORECASE): | ||||
|       # parse human-friendly entered size | ||||
|       limitMegabytes = humanfriendly.parse_size(f"{args.limit}{'' if args.limit.lower().endswith('b') else 'b'}") // 1000000 | ||||
|     elif args.limit.endswith('%'): | ||||
|       # percentage (must calculate megabytes based on /_cat/allocation below) | ||||
|       limitPercent = int(args.limit[:-1]) | ||||
|       if (limitPercent <= 0) or (limitPercent >= 100): | ||||
|         raise Exception(f'Invalid limit percentage {args.limit}') | ||||
|  | ||||
|   if (limitPercent is not None): | ||||
|  | ||||
|     # get allocation statistics for node(s) to do percentage calculation | ||||
|     esDiskUsageStats = [] | ||||
|     esInfoResponse = requests.get(f'{args.elasticUrl}/_cat/allocation{f"/{args.node}" if args.node else ""}?format=json') | ||||
|     esInfo = esInfoResponse.json() | ||||
|  | ||||
|     # normalize allocation statistics' sizes (eg., 100mb) into bytes | ||||
|     if (len(esInfo) > 1): | ||||
|       esDiskUsageStats = [] | ||||
|       for stat in esInfo: | ||||
|         if ('node' in stat) and (stat['node'] != 'UNASSIGNED'): | ||||
|           esDiskUsageStats.append({key:humanfriendly.parse_size(value) if re.match(r'^\d+(\.\d+)?\s*[kmgtp]?b$', value, flags=re.IGNORECASE) else value for (key,value) in stat.items()}) | ||||
|  | ||||
|     if debug: | ||||
|       eprint(json.dumps(esDiskUsageStats)) | ||||
|  | ||||
|     # esDiskUsageStats should now look like: | ||||
|     # [ | ||||
|     #     { | ||||
|     #         "shards": "17", | ||||
|     #         "disk.indices": 14500000, | ||||
|     #         "disk.used": 148400000000, | ||||
|     #         "disk.avail": 1600000000000, | ||||
|     #         "disk.total": 1800000000000, | ||||
|     #         "disk.percent": "7", | ||||
|     #         "host": "172.22.2.3", | ||||
|     #         "ip": "172.22.2.3", | ||||
|     #         "node": "elasticsearch" | ||||
|     #     }, | ||||
|     #     ... | ||||
|     # ] | ||||
|     if (len(esDiskUsageStats) != 1): | ||||
|       raise Exception(f'Unable to determine node, please specify --node if using a percentage limit') | ||||
|     elif ('disk.total' not in esDiskUsageStats[0]): | ||||
|       raise Exception(f'Unable to determine disk.total for {esDiskUsageStats[0]["node"] if "node" in esDiskUsageStats[0] else node}') | ||||
|     limitMegabytes = int(float(esDiskUsageStats[0]['disk.total']) * (float(limitPercent) / 100.0)) // 1000000 | ||||
|  | ||||
|   if (limitMegabytes is None) or (limitMegabytes <= 0): | ||||
|     raise Exception(f'Invalid (or unable to calculate) limit megabytes from {args.limit}') | ||||
|  | ||||
|   # now the limit has been calculated and stored (as megabytes) in limitMegabytes | ||||
|   if debug: | ||||
|     eprint(f'Index limit for {args.index} is {humanfriendly.format_size(humanfriendly.parse_size(f"{limitMegabytes}mb"))}') | ||||
|  | ||||
|   # now determine the total size of the indices from the index pattern | ||||
|   esInfoResponse = requests.get(f'{args.elasticUrl}/{args.index}/_stats/store') | ||||
|   esInfo = esInfoResponse.json() | ||||
|   try: | ||||
|     totalSizeInMegabytes = esInfo['_all']['primaries' if args.primaryTotals else 'total']['store']['size_in_bytes'] // 1000000 | ||||
|     totalIndices = len(esInfo["indices"]) | ||||
|   except Exception as e: | ||||
|     raise Exception(f'Error getting {args.index} size_in_bytes: {e}') | ||||
|   if debug: | ||||
|     eprint(f'Total {args.index} megabytes: is {humanfriendly.format_size(humanfriendly.parse_size(f"{totalSizeInMegabytes}mb"))}') | ||||
|  | ||||
|   if (totalSizeInMegabytes > limitMegabytes): | ||||
|     # the indices have outgrown their bounds, we need to delete the oldest | ||||
|  | ||||
|     if debug: | ||||
|       eprint(f'{len(esInfo)} {args.index} indices occupy {humanfriendly.format_size(humanfriendly.parse_size(f"{totalSizeInMegabytes}mb"))} ({humanfriendly.format_size(humanfriendly.parse_size(f"{limitMegabytes}mb"))} allowed)') | ||||
|  | ||||
|     # get list of indexes in index pattern and sort by creation date | ||||
|     esInfoResponse = requests.get(f'{args.elasticUrl}/_cat/indices/{args.index}', | ||||
|                                   params={'format':'json', | ||||
|                                           'h':'i,id,status,health,rep,creation.date,pri.store.size,store.size'}) | ||||
|     esInfo = sorted(esInfoResponse.json(), key=lambda k: k['i' if args.nameSorted else 'creation.date']) | ||||
|     totalIndices = len(esInfo) | ||||
|  | ||||
|     # determine how many megabytes need to be deleted and which of the oldest indices will cover that | ||||
|     indicesToDelete = [] | ||||
|     needsDeletedMb = totalSizeInMegabytes-limitMegabytes | ||||
|     sizeKey = 'pri.store.size' if args.primaryTotals else 'store.size' | ||||
|     for index in esInfo: | ||||
|       indexSizeMb = humanfriendly.parse_size(index[sizeKey]) // 1000000 | ||||
|       if (needsDeletedMb > 0): | ||||
|         indicesToDelete.append(index) | ||||
|         needsDeletedMb = needsDeletedMb-indexSizeMb | ||||
|       else: | ||||
|         break | ||||
|  | ||||
|     if (len(indicesToDelete) > 0): | ||||
|       # we've determined we can free up space from the index pattern | ||||
|       print(f'{"Would delete" if args.dryrun else "Deleting"} {humanfriendly.format_size(humanfriendly.parse_size(f"{sum([humanfriendly.parse_size(index[sizeKey]) // 1000000 for index in indicesToDelete])}mb"))} in {len(indicesToDelete)} indices ({indicesToDelete[0]["i"]} to {indicesToDelete[-1]["i"]} ordered by {"name" if args.nameSorted else "creation date"})') | ||||
|  | ||||
|       if not args.dryrun: | ||||
|         # delete the indices to free up the space indicated | ||||
|         for index in indicesToDelete: | ||||
|           esDeleteResponse = requests.delete(f'{args.elasticUrl}/{index["i"]}') | ||||
|           print(f'DELETE {index["i"]} ({humanfriendly.format_size(humanfriendly.parse_size(index[sizeKey]))}): {requests.status_codes._codes[esDeleteResponse.status_code][0]}') | ||||
|  | ||||
|     else: | ||||
|       # no indexes to delete | ||||
|       print(f'Nothing to do: could not determine list of {args.index} indices to delete') | ||||
|  | ||||
|   else: | ||||
|     # we haven't hit the limit, nothing to do | ||||
|     print(f'Nothing to do: {totalIndices} {args.index} indices occupy {humanfriendly.format_size(humanfriendly.parse_size(f"{totalSizeInMegabytes}mb"))} of {humanfriendly.format_size(humanfriendly.parse_size(f"{limitMegabytes}mb"))} allowed') | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										98
									
								
								Vagrant/resources/malcolm/shared/bin/elastic_search_status.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										98
									
								
								Vagrant/resources/malcolm/shared/bin/elastic_search_status.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,98 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| set -e | ||||
|  | ||||
| ENCODING="utf-8" | ||||
|  | ||||
| # options | ||||
| # -v      (verbose) | ||||
| # | ||||
| # -e url  (Elasticsearch URL, e.g., http://elasticsearch:9200) | ||||
| # OR | ||||
| # -i ip   (Elasticsearch ip) | ||||
| # -p port (Elasticsearch port) | ||||
| # | ||||
| # -w      (wait not only for "up" status, but also wait for actual sessions2-* logs to exist) | ||||
|  | ||||
| ES_URL= | ||||
| WAIT_FOR_LOG_DATA=0 | ||||
| while getopts 've:i:p:w' OPTION; do | ||||
|   case "$OPTION" in | ||||
|     v) | ||||
|       set -x | ||||
|       ;; | ||||
|  | ||||
|     e) | ||||
|       ES_URL="$OPTARG" | ||||
|       ;; | ||||
|  | ||||
|     i) | ||||
|       ES_HOST="$OPTARG" | ||||
|       ;; | ||||
|  | ||||
|     p) | ||||
|       ES_PORT="$OPTARG" | ||||
|       ;; | ||||
|  | ||||
|     w) | ||||
|       WAIT_FOR_LOG_DATA=1 | ||||
|       ;; | ||||
|  | ||||
|     ?) | ||||
|       echo "script usage: $(basename $0) [-v] [-e <Elasticsearch URL>] [-w]" >&2 | ||||
|       exit 1 | ||||
|       ;; | ||||
|   esac | ||||
| done | ||||
| shift "$(($OPTIND -1))" | ||||
|  | ||||
| if [[ -z $ES_URL ]]; then | ||||
|   if [[ -n $ELASTICSEARCH_URL ]]; then | ||||
|     ES_URL="$ELASTICSEARCH_URL" | ||||
|   elif [[ -n $ES_HOST ]] && [[ -n $ES_PORT ]]; then | ||||
|     ES_URL="http://$ES_HOST:$ES_PORT" | ||||
|   else | ||||
|     ES_URL="http://elasticsearch:9200" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
|  | ||||
| # wait for the ES HTTP server to respond at all | ||||
| until $(curl --output /dev/null --silent --head --fail "$ES_URL"); do | ||||
|   # printf '.' >&2 | ||||
|   sleep 1 | ||||
| done | ||||
|  | ||||
| # now wait for the HTTP "Ok" response | ||||
| until [ "$(curl --write-out %{http_code} --silent --output /dev/null "$ES_URL")" = "200" ]; do | ||||
|   # printf '-' >&2 | ||||
|   sleep 1 | ||||
| done | ||||
|  | ||||
| # next wait for ES status to turn to green or yellow | ||||
| until [[ "$(curl -fsSL "$ES_URL/_cat/health?h=status" | sed -r 's/^[[:space:]]+|[[:space:]]+$//g')" =~ ^(yellow|green)$ ]]; do | ||||
|   # printf '+' >&2 | ||||
|   sleep 1 | ||||
| done | ||||
|  | ||||
| echo "Elasticsearch is up and healthy at "$ES_URL"" >&2 | ||||
|  | ||||
| if (( $WAIT_FOR_LOG_DATA == 1 )); then | ||||
|   sleep 1 | ||||
|  | ||||
|   echo "Waiting until Elasticsearch has logs..." >&2 | ||||
|  | ||||
|   # wait until at least one sessions2-* index exists | ||||
|   until (( $(curl -fs -H'Content-Type: application/json' -XGET "$ES_URL/_cat/indices/sessions2-*" 2>/dev/null | wc -l) > 0 )) ; do | ||||
|     sleep 5 | ||||
|   done | ||||
|   echo "Log indices exist." >&2 | ||||
|  | ||||
|   # wait until at least one record with @timestamp exists | ||||
|   until curl -fs -H'Content-Type: application/json' -XPOST "$ES_URL/sessions2-*/_search" -d'{ "sort": { "@timestamp" : "desc" }, "size" : 1 }' >/dev/null 2>&1 ; do | ||||
|     sleep 5 | ||||
|   done | ||||
|   echo "Logs exist." >&2 | ||||
| fi | ||||
							
								
								
									
										122
									
								
								Vagrant/resources/malcolm/shared/bin/fstab.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								Vagrant/resources/malcolm/shared/bin/fstab.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,122 @@ | ||||
| #!/usr/bin/env python | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| # fstab interpreter | ||||
|  | ||||
| import os | ||||
|  | ||||
| class Fstab: | ||||
|     """This class extends file in order to implement a file reader/writer | ||||
|     for file `/etc/fstab` | ||||
|     """ | ||||
|  | ||||
|     class Entry(object): | ||||
|         """Entry class represents a non-comment line on the `/etc/fstab` file | ||||
|         """ | ||||
|         def __init__(self, device, mountpoint, filesystem, | ||||
|                      options, fs_freq=0, fs_passno=0): | ||||
|             self.device = device | ||||
|             self.mountpoint = mountpoint | ||||
|             self.filesystem = filesystem | ||||
|  | ||||
|             if not options: | ||||
|                 options = "defaults" | ||||
|  | ||||
|             self.options = options | ||||
|             self.fs_freq = fs_freq | ||||
|             self.fs_passno = fs_passno | ||||
|  | ||||
|         def __eq__(self, o): | ||||
|             return str(self) == str(o) | ||||
|  | ||||
|         def __str__(self): | ||||
|             return "{} {} {} {} {} {}".format(self.device, | ||||
|                                               self.mountpoint, | ||||
|                                               self.filesystem, | ||||
|                                               self.options, | ||||
|                                               self.fs_freq, | ||||
|                                               self.fs_passno) | ||||
|  | ||||
|     DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') | ||||
|  | ||||
|     def __init__(self, path=None): | ||||
|         if path: | ||||
|             self._path = path | ||||
|         else: | ||||
|             self._path = self.DEFAULT_PATH | ||||
|         self.f = open(self._path, 'r+') | ||||
|  | ||||
|     def __enter__ (self): | ||||
|         return self.f | ||||
|  | ||||
|     def __exit__ (self, exc_type, exc_value, traceback): | ||||
|         self.f.close() | ||||
|  | ||||
|     def _hydrate_entry(self, line): | ||||
|         return Fstab.Entry(*[ x for x in line.replace("\t"," ").strip("\n").split(" ") | ||||
|                               if x not in ('', None) ]) | ||||
|  | ||||
|     @property | ||||
|     def entries(self): | ||||
|         self.f.seek(0) | ||||
|         for line in self.f.readlines(): | ||||
|             try: | ||||
|                 if not line.startswith("#"): | ||||
|                     yield self._hydrate_entry(line) | ||||
|             except ValueError: | ||||
|                 pass | ||||
|  | ||||
|     def get_entry_by_attr(self, attr, value): | ||||
|         for entry in self.entries: | ||||
|             e_attr = getattr(entry, attr) | ||||
|             if e_attr == value: | ||||
|                 return entry | ||||
|         return None | ||||
|  | ||||
|     def add_entry(self, entry): | ||||
|         if self.get_entry_by_attr('device', entry.device): | ||||
|             return False | ||||
|  | ||||
|         self.f.write(str(entry) + '\n') | ||||
|         self.f.truncate() | ||||
|         return entry | ||||
|  | ||||
|     def remove_entry(self, entry): | ||||
|         self.f.seek(0) | ||||
|  | ||||
|         lines = self.f.readlines() | ||||
|  | ||||
|         found = False | ||||
|         for index, line in enumerate(lines): | ||||
|             if not line.startswith("#"): | ||||
|                 if self._hydrate_entry(line) == entry: | ||||
|                     found = True | ||||
|                     break | ||||
|  | ||||
|         if not found: | ||||
|             return False | ||||
|  | ||||
|         lines.remove(line) | ||||
|  | ||||
|         self.f.seek(0) | ||||
|         self.f.write(''.join(lines)) | ||||
|         self.f.truncate() | ||||
|         return True | ||||
|  | ||||
|     @classmethod | ||||
|     def remove_by_mountpoint(cls, mountpoint, path=None): | ||||
|         fstab = cls(path=path) | ||||
|         entry = fstab.get_entry_by_attr('mountpoint', mountpoint) | ||||
|         if entry: | ||||
|             return fstab.remove_entry(entry) | ||||
|         return False | ||||
|  | ||||
|     @classmethod | ||||
|     def add(cls, device, mountpoint, filesystem, options=None, fs_freq=0, fs_passno=0, path=None): | ||||
|         return cls(path=path).add_entry(Fstab.Entry(device, | ||||
|                                                     mountpoint, filesystem, | ||||
|                                                     options=options, | ||||
|                                                     fs_freq=fs_freq, | ||||
|                                                     fs_passno=fs_passno)) | ||||
							
								
								
									
										150
									
								
								Vagrant/resources/malcolm/shared/bin/grassmarlin_translate.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										150
									
								
								Vagrant/resources/malcolm/shared/bin/grassmarlin_translate.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,150 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| import argparse | ||||
| import os | ||||
| import sys | ||||
| import pprint | ||||
| import json | ||||
| import socket | ||||
| import xml.etree.ElementTree as ET | ||||
|  | ||||
| import mmguero | ||||
| from mmguero import eprint | ||||
|  | ||||
| from itertools import groupby | ||||
| from collections import defaultdict | ||||
| from collections import OrderedDict | ||||
|  | ||||
| ################################################################################################### | ||||
| args = None | ||||
| debug = False | ||||
| script_name = os.path.basename(__file__) | ||||
| script_path = os.path.dirname(os.path.realpath(__file__)) | ||||
| orig_path = os.getcwd() | ||||
|  | ||||
| ################################################################################################### | ||||
| IGNORE_FINTERPRINT_FILES = ("Operating System.xml", "OPC.xml") | ||||
| IGNORE_COMMON_PORTS = (21, 22, 53, 67, 68, 80, 443, 502, 8000, 8080) | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| def main(): | ||||
|   global args | ||||
|   global debug | ||||
|  | ||||
|   parser = argparse.ArgumentParser(description=script_name, add_help=False, usage='{} <arguments>'.format(script_name)) | ||||
|   parser.add_argument('-d', '--defaults', dest='accept_defaults', type=mmguero.str2bool, nargs='?', const=True, default=False, metavar='true|false', help="Accept defaults to prompts without user interaction") | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', type=mmguero.str2bool, nargs='?', const=True, default=False, metavar='true|false', help="Verbose/debug output") | ||||
|   parser.add_argument(dest='input', metavar='<string>', type=str, nargs='+', help="Input file(s)") | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   debug = args.debug | ||||
|   if debug: | ||||
|     eprint(os.path.join(script_path, script_name)) | ||||
|     eprint("Arguments: {}".format(sys.argv[1:])) | ||||
|     eprint("Arguments: {}".format(args)) | ||||
|   else: | ||||
|     sys.tracebacklimit = 0 | ||||
|  | ||||
|   # map protocol numbers to lowercase names (e.g., 6 to 'tcp'), defaulting to '-' for zeek to signify "not set" | ||||
|   protomap = defaultdict(lambda: 'unknown_transport') | ||||
|   protomap.update({num:name[8:].lower() for name, num in vars(socket).items() if name.startswith("IPPROTO")}) | ||||
|  | ||||
|   fingerprints = defaultdict(lambda: None) | ||||
|  | ||||
|   for fingerprintFile in args.input: | ||||
|  | ||||
|     fingerprint = defaultdict(lambda: None) | ||||
|     fingerprint['Payloads'] = {} | ||||
|  | ||||
|     if ((args.input is not None) and | ||||
|         os.path.isfile(fingerprintFile) and | ||||
|         (os.path.basename(fingerprintFile) not in IGNORE_FINTERPRINT_FILES)): | ||||
|  | ||||
|       root = ET.parse(fingerprintFile).getroot(); | ||||
|       if (root.tag == 'Fingerprint'): | ||||
|  | ||||
|         if (header := root.find('Header')) is not None: | ||||
|           headerInfo = {}; | ||||
|           for child in header: | ||||
|             headerInfo[child.tag] = ' '.join(child.text.split()) | ||||
|           fingerprint.update(headerInfo) | ||||
|  | ||||
|         for item in root.findall('./Payload'): | ||||
|           filterFor = item.attrib['For'].strip() if 'For' in item.attrib else None | ||||
|           if filterFor: | ||||
|             payloadInfo = defaultdict(lambda: None) if filterFor not in fingerprint['Payloads'] else fingerprint['Payloads'][filterFor] | ||||
|             payloadFilters = defaultdict(lambda: None) if payloadInfo['Filters'] == None else payloadInfo['Filters'] | ||||
|             if (descriptionItem := item.find('./Description')) is not None: | ||||
|               payloadInfo['Description'] = ' '.join(descriptionItem.text.split()) | ||||
|             details = defaultdict(lambda: '-') | ||||
|             if (returnItem := item.find('./Always/Return')) is not None: | ||||
|               payloadInfo.update(returnItem.attrib) | ||||
|               if (detailsItem := returnItem.find('./Details')) is not None: | ||||
|                 if (categoryItem := detailsItem.find('./Category')) is not None: | ||||
|                   details['Category'] = categoryItem.text; | ||||
|                 if (roleItem := detailsItem.find('./Role')) is not None: | ||||
|                   details['Role'] = roleItem.text; | ||||
|                 for detailItem in detailsItem.findall('./Detail'): | ||||
|                   detailName = detailItem.attrib['Name'] if 'Name' in detailItem.attrib else None | ||||
|                   if detailName: | ||||
|                     details[detailName] = detailItem.text | ||||
|             payloadInfo['Filters'] = payloadFilters | ||||
|             payloadInfo['Details'] = details | ||||
|             fingerprint['Payloads'][filterFor] = payloadInfo | ||||
|  | ||||
|         for item in root.findall('./Filter'): | ||||
|           filterFor = item.attrib['For'].strip() if 'For' in item.attrib else None | ||||
|           if filterFor in fingerprint['Payloads']: | ||||
|             filterName = item.attrib['Name'] if 'Name' in item.attrib else f"{len(fingerprint['Payloads'][filterFor]['Filters'])+1}" | ||||
|             filterDetails = defaultdict(lambda: '-') if filterName not in fingerprint['Payloads'][filterFor]['Filters'] else fingerprint['Payloads'][filterFor]['Filters'][filterName] | ||||
|             for child in item: | ||||
|               if child.text: | ||||
|                 filterDetails[child.tag] = int(child.text) if child.text.isdigit() else child.text | ||||
|               if child.attrib: | ||||
|                 filterDetails[child.tag] = child.attrib | ||||
|  | ||||
|             # we're going to filter out some very common traffic types here (modbus, basic HTTP, etc.) which would probably | ||||
|             # always be either redundant or a false positive | ||||
|             onlyCommonDst = ((filterDetails["DstPort"] in IGNORE_COMMON_PORTS) and (filterDetails["SrcPort"] in [filterDetails["DstPort"], '-'])) | ||||
|             onlyCommonSrc = ((filterDetails["SrcPort"] in IGNORE_COMMON_PORTS) and (filterDetails["DstPort"] in [filterDetails["SrcPort"], '-'])) | ||||
|             if onlyCommonDst: | ||||
|               del filterDetails["DstPort"] | ||||
|             if onlyCommonSrc: | ||||
|               del filterDetails["SrcPort"] | ||||
|  | ||||
|             fingerprint['Payloads'][filterFor]['Filters'][filterName] = filterDetails | ||||
|  | ||||
|       fingerprints[os.path.basename(fingerprintFile)] = fingerprint | ||||
|  | ||||
|   print('\t'.join(['#fields', 'proto', 'dport', 'sport', 'name', 'service', 'category', 'role'])) | ||||
|   for filename, fingerprint in fingerprints.items(): | ||||
|     if "Payloads" in fingerprint: | ||||
|       for name, payload in fingerprint["Payloads"].items(): | ||||
|         if "Filters" in payload: | ||||
|           for filtername, filters in payload["Filters"].items(): | ||||
|             # need to have at least one port to guess, protocol isn't enough by itself | ||||
|             dstPort = filters["DstPort"] if (filters["DstPort"] != '-') else 0 | ||||
|             srcPort = filters["SrcPort"] if (filters["SrcPort"] != '-') else 0 | ||||
|             if (dstPort != 0) or (srcPort != 0): | ||||
|               nameItems = [x for x in list(OrderedDict.fromkeys(" ".join([fingerprint["Name"], name, filtername]).split())) if x.lower() not in ["dst", "src", "dstport", "srcport", "default"]] | ||||
|               zeekItems = [protomap[filters["TransportProtocol"]], | ||||
|                            dstPort, | ||||
|                            srcPort, | ||||
|                            " ".join(nameItems), | ||||
|                            payload["Details"]["ICSProtocol"], | ||||
|                            payload["Details"]["Category"], | ||||
|                            payload["Details"]["Role"]] | ||||
|               print('\t'.join(map(str,zeekItems))) | ||||
|  | ||||
| ################################################################################################### | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										45
									
								
								Vagrant/resources/malcolm/shared/bin/jdk-cacerts-auto-import.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										45
									
								
								Vagrant/resources/malcolm/shared/bin/jdk-cacerts-auto-import.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,45 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # determine the location of the cacerts file we're adding to | ||||
|  | ||||
| JDK_DIR="$(find /usr -type d -name jdk | head -n 1)" | ||||
|  | ||||
| CACERTS_FILE="$JDK_DIR"/lib/security/cacerts | ||||
| KEYTOOL_BIN="$JDK_DIR"/bin/keytool | ||||
|  | ||||
| if [[ ! -f "$CACERTS_FILE" ]] || [[ ! -x "$KEYTOOL_BIN" ]]; then | ||||
|   echo "Unable to locate cacerts and/or keytool " >&2 | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| unset TRUSTED_CA_DIR | ||||
| TRUSTED_CA_DIRNAME=${CA_DIR:-"ca-trust"} | ||||
| CA_DIR_PARENTS=( | ||||
|   "$JDK_DIR"/../"$TRUSTED_CA_DIRNAME" | ||||
|   /etc/"$TRUSTED_CA_DIRNAME" | ||||
|   /opt/"$TRUSTED_CA_DIRNAME" | ||||
|   /"$TRUSTED_CA_DIRNAME" | ||||
| ) | ||||
| for i in ${CA_DIR_PARENTS[@]}; do | ||||
|   TMP_DIR="$(realpath "$i")" | ||||
|   if [[ -d "$i" ]]; then | ||||
|     TRUSTED_CA_DIR="$i" | ||||
|     break; | ||||
|   fi | ||||
| done | ||||
|  | ||||
| if [[ -z $TRUSTED_CA_DIR ]] || [[ ! -d "$TRUSTED_CA_DIR" ]]; then | ||||
|   echo "Unable to locate directory containing trusted CA certificates" >&2 | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| echo | ||||
| find "$TRUSTED_CA_DIR" -type f -print0 | while read -d $'\0' CRT_FILE; do | ||||
|   CRT_FILE_BASE="$(basename "$CRT_FILE" | sed 's/\.[^.]*$//')" | ||||
|   if [[ -n $CRT_FILE_BASE ]] && [[ "$CRT_FILE_BASE" != \.* ]] ; then | ||||
|     echo "Importing \"$CRT_FILE_BASE\"... " | ||||
|     ( "$KEYTOOL_BIN" -importcert -cacerts -trustcacerts -file "$CRT_FILE" -alias "$CRT_FILE_BASE" -keypass changeit -storepass changeit -noprompt 2>&1 | grep -Pv "(already exists)" ) || true | ||||
|     "$KEYTOOL_BIN" -list -cacerts -alias "$CRT_FILE_BASE" -keypass changeit -storepass changeit -noprompt | ||||
|     echo | ||||
|   fi | ||||
| done | ||||
							
								
								
									
										177
									
								
								Vagrant/resources/malcolm/shared/bin/malass_client.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										177
									
								
								Vagrant/resources/malcolm/shared/bin/malass_client.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,177 @@ | ||||
| #!/usr/bin/env python3 | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| """This script (malass_client.py) simulates the Malass 'file upload' HTML web form. | ||||
|    The 'file upload' HTML form is used to 'upload a file', and several HTML form fields, | ||||
|    to the Malass web server. (for scanning) | ||||
|  | ||||
|    Author:  Brett Rasmussen | ||||
|    Date :   Mar 20, 2013 | ||||
|    Revised: Mar  8, 2019 | ||||
|    Revised: May  2, 2019 (Seth Grover) | ||||
| """ | ||||
|  | ||||
| import requests | ||||
| import sys | ||||
|  | ||||
| ################################################################################ | ||||
| ################################################################################ | ||||
| def parse_transaction_id(http_response_page): | ||||
|   """ Parse the Malass transaction_id value from the passed in 'http_response_page'. | ||||
|  | ||||
|     http_response_page: (in) Web page returned by the Malass web site, in response | ||||
|                              to a file upload operation. | ||||
|     Return ok        (Boolean); | ||||
|            error_msg (Error description on error); | ||||
|            trans_id (The malass transaction_ID number.); | ||||
|   """ | ||||
|   lpzProc = sys._getframe().f_code.co_name | ||||
|  | ||||
|   # A "Server Transaction ID #:" field might look like the following: | ||||
|   # | ||||
|   #    <td>Server Transaction ID #:</td><td><input name="trans_id" type="text" value="663"></td> | ||||
|   # | ||||
|  | ||||
|   target_str_1 = "<td>Server Transaction ID #:" | ||||
|   start_idx = http_response_page.find(target_str_1) | ||||
|   pattern_found = (start_idx != -1) | ||||
|   if (not pattern_found): | ||||
|     return (False, f"{lpzProc}: Error: Could not find (1st) target_str={target_str_1}", "") | ||||
|  | ||||
|   target_str_2 = "value=" | ||||
|   start_idx_2 = http_response_page.find(target_str_2, start_idx) | ||||
|   pattern_found = (start_idx_2 != -1) | ||||
|   if (not pattern_found): | ||||
|     return (False, f"{lpzProc}: Error: Could not find (2nd) target_str={target_str_2}", "") | ||||
|  | ||||
|   target_str_3 = '"' | ||||
|   start_idx_3 = http_response_page.find(target_str_3, start_idx_2) | ||||
|   pattern_found = (start_idx_3 != -1) | ||||
|   if (not pattern_found): | ||||
|     return (False, f"{lpzProc}: Error: Could not find (3rd) target_str={target_str_3}", "") | ||||
|  | ||||
|   trans_id_start_idx = start_idx_3 + 1 | ||||
|   target_str_4 = '"' | ||||
|   start_idx_4 = http_response_page.find(target_str_4, trans_id_start_idx) | ||||
|   pattern_found = (start_idx_4 != -1) | ||||
|   if (not pattern_found): | ||||
|     return (False, f"{lpzProc}: Error: Could not find (4th) target_str={target_str_4}", "") | ||||
|  | ||||
|   trans_id = http_response_page[trans_id_start_idx:start_idx_4] | ||||
|   return (True, "", trans_id) | ||||
|  | ||||
| ################################################################################ | ||||
| def post_multipart(url, fields={}, files={}): | ||||
|   """ | ||||
|   Post fields and files to a host as HTTP MIME multipart/form-data. | ||||
|     url    - The URL for the POST request | ||||
|     fields - a dictionary of form fields, eg.: {'Upload_Button' : 'Upload File'}, | ||||
|     files  - a dictionary of files, eg.: {'file_1' : open('foobar.bin', 'rb')} | ||||
|  | ||||
|   Return the | ||||
|     http response code; | ||||
|     http response msg; | ||||
|     http response page headers; | ||||
|     http  server's response page. | ||||
|   """ | ||||
|   lpzProc = sys._getframe().f_code.co_name | ||||
|  | ||||
|   parts = dict() | ||||
|   parts.update(fields) | ||||
|   parts.update(files) | ||||
|  | ||||
|   response = requests.post(url, files=parts) | ||||
|  | ||||
|   return (response.status_code, requests.status_codes._codes[response.status_code][0], response.headers, response.text) | ||||
|  | ||||
| ################################################################################ | ||||
| def upload_file_to_malass(upload_file_path,  web_server_ip="127.0.0.1",  web_server_port="80"): | ||||
|   """ Upload a file to the Malass web server, so that it may be scanned by | ||||
|       the Malass application server. | ||||
|  | ||||
|       upload_file_path - (in) Full path of (local) file to upload to the Malass web site | ||||
|                               (e.g. /tmp/my_image.jpeg ) | ||||
|       web_server_ip - (in) IP address of Malass web server.  Defaults to 127.0.0.1 | ||||
|                            (i.e. localhost) | ||||
|       web_server_port  - (in) Web server port.  Defaults to port 80 | ||||
|  | ||||
|     Returns: ok (Boolean); | ||||
|              transaction_id (File upload transaction #); | ||||
|              http_response_page/error_msg (Returned error page OR an Error description msg) | ||||
|   """ | ||||
|   lpzProc = sys._getframe().f_code.co_name | ||||
|  | ||||
|   with open(upload_file_path, "rb") as upload_file_handle: | ||||
|     error_code, error_msg1, headers, resp_str = post_multipart(url=f"http://{web_server_ip}:{web_server_port}/cgi-bin/file_upload.py", | ||||
|                                                                fields={'Upload_Button' : 'Upload File'}, | ||||
|                                                                files={'file_1' : upload_file_handle}) | ||||
|  | ||||
|   null_trans_id     = "" | ||||
|   trans_id          = "" | ||||
|   http_response_page = f"http response code={error_code}\nhttp_response_msg={error_msg1}\nhttp_headers=\n{headers}\nhttp_response_page=\n{resp_str}\n" | ||||
|  | ||||
|   if (error_code == 200): | ||||
|     # Successful HTTP 'POST' operation: | ||||
|     # Parse the 'transaction ID' from the http response: | ||||
|     ok, error_msg, trans_id = parse_transaction_id(http_response_page) | ||||
|  | ||||
|     if (not ok): | ||||
|       not_ok= False | ||||
|       return (not_ok, null_trans_id, (f"{http_response_page}\n[Error parsing 'transaction ID' value.]")) | ||||
|  | ||||
|   else: | ||||
|     not_ok= False | ||||
|     return (not_ok, null_trans_id, (f"{http_response_page}\n[Error: Unexpected HTTP reponse code={error_code}.]")) | ||||
|  | ||||
|   return (True, trans_id, http_response_page) | ||||
|  | ||||
|  | ||||
| ################################################################################ | ||||
| def query_av_summary_rpt(transaction_id,              uploaded_file_name="", | ||||
|                          web_server_ip="127.0.0.1",   web_server_port="80"): | ||||
|  | ||||
|   """ Query the 'AV summary report', for the specified | ||||
|         'server transaction_id' OR | ||||
|         'uploaded_file_name' | ||||
|       value. | ||||
|  | ||||
|       (If a transaction_id is supplied, then the 'uploaded_file_name' field | ||||
|       should be left blank.) | ||||
|  | ||||
|       (Note: you may also specify 'part of a filename' in the uploaded_file_name field. | ||||
|       The most recently submitted, (matching) uploaded file transaction, will be | ||||
|       returned.) | ||||
|  | ||||
|       Note: This routine connects to the Malass web server (rather than the | ||||
|         Malass transaction server.) | ||||
|  | ||||
|  | ||||
|       transaction_id     - (in) A Malass server transaction ID number. (or an | ||||
|                                 empty string) | ||||
|  | ||||
|       uploaded_file_name - (in) The 'base' name of a recently uploaded file. | ||||
|                                 (You may also submit part of a filename in this | ||||
|                                 parameter.) (Or, an empty string.) | ||||
|  | ||||
|       web_server_ip -     (in)  Malass web server IP address.  Defaults to | ||||
|                                 127.0.0.1 (i.e. localhost) | ||||
|       web_server_port  - (in) Web server port.  Defaults to port 80. | ||||
|  | ||||
|     Returns ok (Boolean); | ||||
|             error_msg: (Error description string) | ||||
|             @@ av_summary_rpt_str (Current contents of the av_summary_rpt.txt file, as a | ||||
|                                 single string) | ||||
|    """ | ||||
|   lpzProc = sys._getframe().f_code.co_name | ||||
|  | ||||
|   error_code, error_msg1, headers, resp_str = post_multipart(url=f"http://{web_server_ip}:{web_server_port}/cgi-bin/query_av_summary_rpt.py", | ||||
|                                                              fields={'trans_id' : transaction_id, 'uploaded_filename' : uploaded_file_name}) | ||||
|  | ||||
|   #print "\nerror_code=%s\n"  %  error_code | ||||
|   #print "\nerror_msg1=%s\n"  %  error_msg1 | ||||
|   #print "\nheaders=%s\n"     %  headers | ||||
|   #print "\nresponse_str=%s"  %  resp_str | ||||
|  | ||||
|   new_av_summary_rpt_str = resp_str | ||||
|   return (True, "",  new_av_summary_rpt_str) | ||||
							
								
								
									
										15
									
								
								Vagrant/resources/malcolm/shared/bin/nic-capture-setup.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										15
									
								
								Vagrant/resources/malcolm/shared/bin/nic-capture-setup.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| IFACE_NAME="$1" | ||||
|  | ||||
| if [[ -n "$IFACE_NAME" ]]; then | ||||
|   # disable NIC feature offloading | ||||
|   /sbin/ethtool -K "$IFACE_NAME" rx off tx off sg off tso off ufo off gso off gro off lro off | ||||
|  | ||||
|   # increase ring buffer sizes to maximum (may increase latency, but maximize throughput) | ||||
|   MAX_BUFFER_SIZES=($(/sbin/ethtool -g "$IFACE_NAME" | grep -E "^(RX|TX):" | head -n 2 | awk '{print $2}')) | ||||
|   if ((${#MAX_BUFFER_SIZES[@]} == 2)); then | ||||
|     /sbin/ethtool -G "$IFACE_NAME" rx ${MAX_BUFFER_SIZES[0]} tx ${MAX_BUFFER_SIZES[1]} | ||||
|   fi | ||||
|  | ||||
| fi | ||||
							
								
								
									
										339
									
								
								Vagrant/resources/malcolm/shared/bin/pcap_moloch_and_zeek_processor.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										339
									
								
								Vagrant/resources/malcolm/shared/bin/pcap_moloch_and_zeek_processor.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,339 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| ################################################################################################### | ||||
| # Process queued files reported by pcap_watcher.py, using either moloch-capture or zeek to process | ||||
| # them for session creation and logging into the Elasticsearch database | ||||
| # | ||||
| # Run the script with --help for options | ||||
| ################################################################################################### | ||||
|  | ||||
| import argparse | ||||
| import json | ||||
| import os | ||||
| import shutil | ||||
| import signal | ||||
| import sys | ||||
| import tarfile | ||||
| import tempfile | ||||
| import time | ||||
| import zmq | ||||
|  | ||||
| from pcap_utils import * | ||||
| from multiprocessing.pool import ThreadPool | ||||
| from collections import deque | ||||
| from itertools import chain, repeat | ||||
|  | ||||
| ################################################################################################### | ||||
| MAX_WORKER_PROCESSES_DEFAULT = 1 | ||||
|  | ||||
| PCAP_PROCESSING_MODE_ARKIME = "moloch" | ||||
| PCAP_PROCESSING_MODE_ZEEK = "zeek" | ||||
|  | ||||
| ARKIME_CAPTURE_PATH = "/data/moloch/bin/moloch-capture" | ||||
|  | ||||
| ZEEK_PATH = "/opt/zeek/bin/zeek" | ||||
| ZEEK_EXTRACTOR_MODE_INTERESTING = 'interesting' | ||||
| ZEEK_EXTRACTOR_MODE_MAPPED = 'mapped' | ||||
| ZEEK_EXTRACTOR_MODE_NONE = 'none' | ||||
| ZEEK_EXTRACTOR_SCRIPT = "extractor.zeek" | ||||
| ZEEK_EXTRACTOR_SCRIPT_INTERESTING = "extractor_override.interesting.zeek" | ||||
| ZEEK_LOCAL_SCRIPT = 'local' | ||||
| ZEEK_STATE_DIR = '.state' | ||||
| ZEEK_AUTOZEEK_TAG = 'AUTOZEEK' | ||||
| ZEEK_AUTOCARVE_TAG_PREFIX = 'AUTOCARVE' | ||||
| ZEEK_EXTRACTOR_MODE_ENV_VAR = 'ZEEK_EXTRACTOR_MODE' | ||||
| ZEEK_LOG_COMPRESSION_LEVEL = 6 | ||||
|  | ||||
| ################################################################################################### | ||||
| debug = False | ||||
| verboseDebug = False | ||||
| debugToggled = False | ||||
| pdbFlagged = False | ||||
| args = None | ||||
| scriptName = os.path.basename(__file__) | ||||
| scriptPath = os.path.dirname(os.path.realpath(__file__)) | ||||
| origPath = os.getcwd() | ||||
| shuttingDown = False | ||||
| scanWorkersCount = AtomicInt(value=0) | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigint/sigterm and set a global shutdown variable | ||||
| def shutdown_handler(signum, frame): | ||||
|   global shuttingDown | ||||
|   shuttingDown = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr1 for a pdb breakpoint | ||||
| def pdb_handler(sig, frame): | ||||
|   global pdbFlagged | ||||
|   pdbFlagged = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr2 for toggling debug | ||||
| def debug_toggle_handler(signum, frame): | ||||
|   global debug | ||||
|   global debugToggled | ||||
|   debug = not debug | ||||
|   debugToggled = True | ||||
|  | ||||
| ################################################################################################### | ||||
| def molochCaptureFileWorker(molochWorkerArgs): | ||||
|   global debug | ||||
|   global verboseDebug | ||||
|   global shuttingDown | ||||
|   global scanWorkersCount | ||||
|  | ||||
|   scanWorkerId = scanWorkersCount.increment() # unique ID for this thread | ||||
|  | ||||
|   newFileQueue, pcapBaseDir, molochBin, autotag, notLocked = molochWorkerArgs[0], molochWorkerArgs[1], molochWorkerArgs[2], molochWorkerArgs[3], molochWorkerArgs[4] | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\tstarted") | ||||
|  | ||||
|   # loop forever, or until we're told to shut down | ||||
|   while not shuttingDown: | ||||
|     try: | ||||
|       # pull an item from the queue of files that need to be processed | ||||
|       fileInfo = newFileQueue.popleft() | ||||
|     except IndexError: | ||||
|       time.sleep(1) | ||||
|     else: | ||||
|       if isinstance(fileInfo, dict) and (FILE_INFO_DICT_NAME in fileInfo): | ||||
|  | ||||
|         if pcapBaseDir and os.path.isdir(pcapBaseDir): | ||||
|           fileInfo[FILE_INFO_DICT_NAME] = os.path.join(pcapBaseDir, fileInfo[FILE_INFO_DICT_NAME]) | ||||
|  | ||||
|         if os.path.isfile(fileInfo[FILE_INFO_DICT_NAME]): | ||||
|           # finalize tags list | ||||
|           fileInfo[FILE_INFO_DICT_TAGS] = [x for x in fileInfo[FILE_INFO_DICT_TAGS] if (x != ZEEK_AUTOZEEK_TAG) and (not x.startswith(ZEEK_AUTOCARVE_TAG_PREFIX))] if ((FILE_INFO_DICT_TAGS in fileInfo) and autotag) else list() | ||||
|           if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t🔎\t{fileInfo}") | ||||
|  | ||||
|           # put together moloch execution command | ||||
|           cmd = [molochBin, '--quiet', '-r', fileInfo[FILE_INFO_DICT_NAME]] | ||||
|           if notLocked: cmd.append('--nolockpcap') | ||||
|           cmd.extend(list(chain.from_iterable(zip(repeat('-t'), fileInfo[FILE_INFO_DICT_TAGS])))) | ||||
|  | ||||
|           # execute moloch-capture for pcap file | ||||
|           retcode, output = run_process(cmd, debug=verboseDebug) | ||||
|           if (retcode == 0): | ||||
|             if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t✅\t{os.path.basename(fileInfo[FILE_INFO_DICT_NAME])}") | ||||
|           else: | ||||
|             if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t❗\t{molochBin} {os.path.basename(fileInfo[FILE_INFO_DICT_NAME])} returned {retcode} {output if verboseDebug else ''}") | ||||
|  | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\tfinished") | ||||
|  | ||||
| ################################################################################################### | ||||
| def zeekFileWorker(zeekWorkerArgs): | ||||
|   global debug | ||||
|   global verboseDebug | ||||
|   global shuttingDown | ||||
|   global scanWorkersCount | ||||
|  | ||||
|   scanWorkerId = scanWorkersCount.increment() # unique ID for this thread | ||||
|  | ||||
|   newFileQueue, pcapBaseDir, zeekBin, autozeek, autotag, uploadDir, defaultExtractFileMode = zeekWorkerArgs[0], zeekWorkerArgs[1], zeekWorkerArgs[2], zeekWorkerArgs[3], zeekWorkerArgs[4], zeekWorkerArgs[5], zeekWorkerArgs[6] | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\tstarted") | ||||
|  | ||||
|   # loop forever, or until we're told to shut down | ||||
|   while not shuttingDown: | ||||
|     try: | ||||
|       # pull an item from the queue of files that need to be processed | ||||
|       fileInfo = newFileQueue.popleft() | ||||
|     except IndexError: | ||||
|       time.sleep(1) | ||||
|     else: | ||||
|       if isinstance(fileInfo, dict) and (FILE_INFO_DICT_NAME in fileInfo) and os.path.isdir(uploadDir): | ||||
|  | ||||
|         if pcapBaseDir and os.path.isdir(pcapBaseDir): | ||||
|           fileInfo[FILE_INFO_DICT_NAME] = os.path.join(pcapBaseDir, fileInfo[FILE_INFO_DICT_NAME]) | ||||
|  | ||||
|         if os.path.isfile(fileInfo[FILE_INFO_DICT_NAME]): | ||||
|           # zeek this PCAP if it's tagged "AUTOZEEK" or if the global autozeek flag is turned on | ||||
|           if autozeek or ((FILE_INFO_DICT_TAGS in fileInfo) and ZEEK_AUTOZEEK_TAG in fileInfo[FILE_INFO_DICT_TAGS]): | ||||
|  | ||||
|             extractFileMode = defaultExtractFileMode | ||||
|  | ||||
|             # if file carving was specified via tag, make note of it | ||||
|             if (FILE_INFO_DICT_TAGS in fileInfo): | ||||
|               for autocarveTag in filter(lambda x: x.startswith(ZEEK_AUTOCARVE_TAG_PREFIX), fileInfo[FILE_INFO_DICT_TAGS]): | ||||
|                 fileInfo[FILE_INFO_DICT_TAGS].remove(autocarveTag) | ||||
|                 extractFileMode = autocarveTag[len(ZEEK_AUTOCARVE_TAG_PREFIX):] | ||||
|  | ||||
|             extractFileMode = extractFileMode.lower() if extractFileMode else ZEEK_EXTRACTOR_MODE_NONE | ||||
|  | ||||
|             # finalize tags list (removing AUTOZEEK and AUTOCARVE*) | ||||
|             fileInfo[FILE_INFO_DICT_TAGS] = [x for x in fileInfo[FILE_INFO_DICT_TAGS] if (x != ZEEK_AUTOZEEK_TAG) and (not x.startswith(ZEEK_AUTOCARVE_TAG_PREFIX))] if ((FILE_INFO_DICT_TAGS in fileInfo) and autotag) else list() | ||||
|             if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t🔎\t{fileInfo}") | ||||
|  | ||||
|             # create a temporary work directory where zeek will be executed to generate the log files | ||||
|             with tempfile.TemporaryDirectory() as tmpLogDir: | ||||
|               if os.path.isdir(tmpLogDir): | ||||
|  | ||||
|                 processTimeUsec = int(round(time.time() * 1000000)) | ||||
|  | ||||
|                 # use Zeek to process the pcap | ||||
|                 zeekCmd = [zeekBin, "-r", fileInfo[FILE_INFO_DICT_NAME], ZEEK_LOCAL_SCRIPT] | ||||
|  | ||||
|                 # set file extraction parameters if required | ||||
|                 if (extractFileMode != ZEEK_EXTRACTOR_MODE_NONE): | ||||
|                   zeekCmd.append(ZEEK_EXTRACTOR_SCRIPT) | ||||
|                   if (extractFileMode == ZEEK_EXTRACTOR_MODE_INTERESTING): | ||||
|                     zeekCmd.append(ZEEK_EXTRACTOR_SCRIPT_INTERESTING) | ||||
|                     extractFileMode = ZEEK_EXTRACTOR_MODE_MAPPED | ||||
|  | ||||
|                 # execute zeek with the cwd of tmpLogDir so that's where the logs go, and with the updated file carving environment variable | ||||
|                 zeekEnv = os.environ.copy() | ||||
|                 zeekEnv[ZEEK_EXTRACTOR_MODE_ENV_VAR] = extractFileMode | ||||
|                 retcode, output = run_process(zeekCmd, cwd=tmpLogDir, env=zeekEnv, debug=verboseDebug) | ||||
|                 if (retcode == 0): | ||||
|                   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t✅\t{os.path.basename(fileInfo[FILE_INFO_DICT_NAME])}") | ||||
|                 else: | ||||
|                   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t❗\t{zeekBin} {os.path.basename(fileInfo[FILE_INFO_DICT_NAME])} returned {retcode} {output if verboseDebug else ''}") | ||||
|  | ||||
|                 # clean up the .state directory we don't care to keep | ||||
|                 tmpStateDir = os.path.join(tmpLogDir, ZEEK_STATE_DIR) | ||||
|                 if os.path.isdir(tmpStateDir): shutil.rmtree(tmpStateDir) | ||||
|  | ||||
|                 # make sure log files were generated | ||||
|                 logFiles = [logFile for logFile in os.listdir(tmpLogDir) if logFile.endswith('.log')] | ||||
|                 if (len(logFiles) > 0): | ||||
|  | ||||
|                   # tar up the results | ||||
|                   tgzFileName = os.path.join(tmpLogDir, "{}-{}-{}.tar.gz".format(os.path.basename(fileInfo[FILE_INFO_DICT_NAME]), '_'.join(fileInfo[FILE_INFO_DICT_TAGS]), processTimeUsec)) | ||||
|                   with tarfile.open(tgzFileName, mode="w:gz", compresslevel=ZEEK_LOG_COMPRESSION_LEVEL) as tar: | ||||
|                     tar.add(tmpLogDir, arcname=os.path.basename('.')) | ||||
|  | ||||
|                   # relocate the tarball to the upload directory (do it this way instead of with a shutil.move because of | ||||
|                   # the way Docker volume mounts work, ie. avoid "OSError: [Errno 18] Invalid cross-device link"). | ||||
|                   # we don't have to explicitly delete it since this whole directory is about to leave context and be removed | ||||
|                   shutil.copy(tgzFileName, uploadDir) | ||||
|                   if verboseDebug: eprint(f"{scriptName}[{scanWorkerId}]:\t⏩\t{tgzFileName} → {uploadDir}") | ||||
|  | ||||
|                 else: | ||||
|                   # zeek returned no log files (or an error) | ||||
|                   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t❓\t{zeekBin} {os.path.basename(fileInfo[FILE_INFO_DICT_NAME])} generated no log files") | ||||
|  | ||||
|               else: | ||||
|                 if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t❗\terror creating temporary directory {tmpLogDir}") | ||||
|  | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\tfinished") | ||||
|  | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| def main(): | ||||
|  | ||||
|   processingMode = None | ||||
|   if (PCAP_PROCESSING_MODE_ARKIME in scriptName) and ('zeek' in scriptName): | ||||
|     eprint(f"{scriptName} could not determine PCAP processing mode. Create a symlink to {scriptName} with either '{PCAP_PROCESSING_MODE_ARKIME}' or '{PCAP_PROCESSING_MODE_ZEEK}' in the name and run that instead.") | ||||
|     exit(2) | ||||
|   elif (PCAP_PROCESSING_MODE_ARKIME in scriptName): | ||||
|     processingMode = PCAP_PROCESSING_MODE_ARKIME | ||||
|   elif (PCAP_PROCESSING_MODE_ZEEK in scriptName): | ||||
|     processingMode = PCAP_PROCESSING_MODE_ZEEK | ||||
|   else: | ||||
|     eprint(f"{scriptName} could not determine PCAP processing mode. Create a symlink to {scriptName} with either '{PCAP_PROCESSING_MODE_ARKIME}' or '{PCAP_PROCESSING_MODE_ZEEK}' in the name and run that instead.") | ||||
|     exit(2) | ||||
|  | ||||
|   global args | ||||
|   global debug | ||||
|   global debugToggled | ||||
|   global pdbFlagged | ||||
|   global shuttingDown | ||||
|   global verboseDebug | ||||
|  | ||||
|   parser = argparse.ArgumentParser(description=scriptName, add_help=False, usage='{} <arguments>'.format(scriptName)) | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', help="Verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--extra-verbose', dest='verboseDebug', help="Super verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--start-sleep', dest='startSleepSec', help="Sleep for this many seconds before starting", metavar='<seconds>', type=int, default=0, required=False) | ||||
|   parser.add_argument('-t', '--threads', dest='threads', help="Worker threads", metavar='<seconds>', type=int, default=MAX_WORKER_PROCESSES_DEFAULT, required=False) | ||||
|   parser.add_argument('--publisher', required=True, dest='publisherHost', help="host publishing PCAP events", metavar='<STR>', type=str, default="127.0.0.1") | ||||
|   parser.add_argument('--autotag', dest='autotag', help="Autotag logs based on PCAP file names", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   requiredNamed = parser.add_argument_group('required arguments') | ||||
|   requiredNamed.add_argument('--pcap-directory', dest='pcapBaseDir', help='Base directory for PCAP files', metavar='<directory>', type=str, required=True) | ||||
|   if (processingMode == PCAP_PROCESSING_MODE_ARKIME): | ||||
|     parser.add_argument('--moloch', required=False, dest='executable', help="moloch-capture executable path", metavar='<STR>', type=str, default=ARKIME_CAPTURE_PATH) | ||||
|     parser.add_argument('--managed', dest='notLocked', help="Allow Arkime to manage PCAP files", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   elif (processingMode == PCAP_PROCESSING_MODE_ZEEK): | ||||
|     parser.add_argument('--zeek', required=False, dest='executable', help="zeek executable path", metavar='<STR>', type=str, default=ZEEK_PATH) | ||||
|     parser.add_argument('--autozeek', dest='autozeek', help="Autoanalyze all PCAP file with Zeek", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|     parser.add_argument('--extract', dest='zeekExtractFileMode', help='Zeek file carving mode', metavar=f'{ZEEK_EXTRACTOR_MODE_INTERESTING}|{ZEEK_EXTRACTOR_MODE_MAPPED}|{ZEEK_EXTRACTOR_MODE_NONE}', type=str, default=ZEEK_EXTRACTOR_MODE_NONE) | ||||
|     requiredNamed.add_argument('--zeek-directory', dest='zeekUploadDir', help='Destination directory for Zeek log files', metavar='<directory>', type=str, required=True) | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   verboseDebug = args.verboseDebug | ||||
|   debug = args.debug or verboseDebug | ||||
|   if debug: | ||||
|     eprint(os.path.join(scriptPath, scriptName)) | ||||
|     eprint("{} arguments: {}".format(scriptName, sys.argv[1:])) | ||||
|     eprint("{} arguments: {}".format(scriptName, args)) | ||||
|   else: | ||||
|     sys.tracebacklimit = 0 | ||||
|  | ||||
|   # handle sigint and sigterm for graceful shutdown | ||||
|   signal.signal(signal.SIGINT, shutdown_handler) | ||||
|   signal.signal(signal.SIGTERM, shutdown_handler) | ||||
|   signal.signal(signal.SIGUSR1, pdb_handler) | ||||
|   signal.signal(signal.SIGUSR2, debug_toggle_handler) | ||||
|  | ||||
|   # sleep for a bit if requested | ||||
|   sleepCount = 0 | ||||
|   while (not shuttingDown) and (sleepCount < args.startSleepSec): | ||||
|     time.sleep(1) | ||||
|     sleepCount += 1 | ||||
|  | ||||
|   # initialize ZeroMQ context and socket(s) to receive filenames and send scan results | ||||
|   context = zmq.Context() | ||||
|  | ||||
|   # Socket to subscribe to messages on | ||||
|   new_files_socket = context.socket(zmq.SUB) | ||||
|   new_files_socket.connect(f"tcp://{args.publisherHost}:{PCAP_TOPIC_PORT}") | ||||
|   new_files_socket.setsockopt(zmq.SUBSCRIBE, b"")  # All topics | ||||
|   new_files_socket.setsockopt(zmq.LINGER, 0)       # All topics | ||||
|   new_files_socket.RCVTIMEO = 1500 | ||||
|   if debug: eprint(f"{scriptName}:\tsubscribed to topic at {PCAP_TOPIC_PORT}") | ||||
|  | ||||
|   # we'll pull from the topic in the main thread and queue them for processing by the worker threads | ||||
|   newFileQueue = deque() | ||||
|  | ||||
|   # start worker threads which will pull filenames/tags to be processed by moloch-capture | ||||
|   if (processingMode == PCAP_PROCESSING_MODE_ARKIME): | ||||
|     scannerThreads = ThreadPool(args.threads, molochCaptureFileWorker, ([newFileQueue,args.pcapBaseDir,args.executable,args.autotag,args.notLocked],)) | ||||
|   elif (processingMode == PCAP_PROCESSING_MODE_ZEEK): | ||||
|     scannerThreads = ThreadPool(args.threads, zeekFileWorker, ([newFileQueue,args.pcapBaseDir,args.executable,args.autozeek,args.autotag,args.zeekUploadDir,args.zeekExtractFileMode],)) | ||||
|  | ||||
|   while (not shuttingDown): | ||||
|     # for debugging | ||||
|     if pdbFlagged: | ||||
|       pdbFlagged = False | ||||
|       breakpoint() | ||||
|  | ||||
|     # accept a file info dict from new_files_socket as json | ||||
|     try: | ||||
|       fileInfo = json.loads(new_files_socket.recv_string()) | ||||
|     except zmq.Again as timeout: | ||||
|       # no file received due to timeout, we'll go around and try again | ||||
|       if verboseDebug: eprint(f"{scriptName}:\t🕑\t(recv)") | ||||
|       fileInfo = None | ||||
|  | ||||
|     if isinstance(fileInfo, dict) and (FILE_INFO_DICT_NAME in fileInfo): | ||||
|       # queue for the workers to process with moloch-capture | ||||
|       newFileQueue.append(fileInfo) | ||||
|       if debug: eprint(f"{scriptName}:\t📨\t{fileInfo}") | ||||
|  | ||||
|   # graceful shutdown | ||||
|   if debug: eprint(f"{scriptName}: shutting down...") | ||||
|   time.sleep(5) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										137
									
								
								Vagrant/resources/malcolm/shared/bin/pcap_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										137
									
								
								Vagrant/resources/malcolm/shared/bin/pcap_utils.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,137 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| import os | ||||
| import re | ||||
| import sys | ||||
|  | ||||
| from subprocess import (PIPE, Popen) | ||||
| from multiprocessing import RawValue | ||||
| from threading import Lock | ||||
|  | ||||
| ################################################################################################### | ||||
| PCAP_TOPIC_PORT = 30441 | ||||
|  | ||||
| PCAP_MIME_TYPES = ['application/vnd.tcpdump.pcap', 'application/x-pcapng'] | ||||
|  | ||||
| FILE_INFO_DICT_NAME = "name" | ||||
| FILE_INFO_DICT_TAGS = "tags" | ||||
| FILE_INFO_DICT_SIZE = "size" | ||||
| FILE_INFO_FILE_TYPE = "type" | ||||
| FILE_INFO_FILE_MIME = "mime" | ||||
|  | ||||
| ################################################################################################### | ||||
| # print to stderr | ||||
| def eprint(*args, **kwargs): | ||||
|   print(*args, file=sys.stderr, **kwargs) | ||||
|   sys.stderr.flush() | ||||
|  | ||||
| ################################################################################################### | ||||
| # convenient boolean argument parsing | ||||
| def str2bool(v): | ||||
|   if v.lower() in ('yes', 'true', 't', 'y', '1'): | ||||
|     return True | ||||
|   elif v.lower() in ('no', 'false', 'f', 'n', '0'): | ||||
|     return False | ||||
|   else: | ||||
|     raise argparse.ArgumentTypeError('Boolean value expected.') | ||||
|  | ||||
| ################################################################################################### | ||||
| # strip a prefix from the beginning of a string if needed | ||||
| def remove_prefix(text, prefix): | ||||
|   if (len(prefix) > 0) and text.startswith(prefix): | ||||
|     return text[len(prefix):] | ||||
|   else: | ||||
|     return text | ||||
|  | ||||
| ################################################################################################### | ||||
| # open a file and close it, updating its access time | ||||
| def touch(filename): | ||||
|   open(filename, 'a').close() | ||||
|   os.utime(filename, None) | ||||
|  | ||||
| ################################################################################################### | ||||
| # run command with arguments and return its exit code, stdout, and stderr | ||||
| def check_output_input(*popenargs, **kwargs): | ||||
|  | ||||
|   if 'stdout' in kwargs: | ||||
|     raise ValueError('stdout argument not allowed, it will be overridden') | ||||
|  | ||||
|   if 'stderr' in kwargs: | ||||
|     raise ValueError('stderr argument not allowed, it will be overridden') | ||||
|  | ||||
|   if 'input' in kwargs and kwargs['input']: | ||||
|     if 'stdin' in kwargs: | ||||
|       raise ValueError('stdin and input arguments may not both be used') | ||||
|     inputdata = kwargs['input'] | ||||
|     kwargs['stdin'] = PIPE | ||||
|   else: | ||||
|     inputdata = None | ||||
|   kwargs.pop('input', None) | ||||
|  | ||||
|   process = Popen(*popenargs, stdout=PIPE, stderr=PIPE, **kwargs) | ||||
|   try: | ||||
|     output, errput = process.communicate(inputdata) | ||||
|   except: | ||||
|     process.kill() | ||||
|     process.wait() | ||||
|     raise | ||||
|  | ||||
|   retcode = process.poll() | ||||
|  | ||||
|   return retcode, output, errput | ||||
|  | ||||
| ################################################################################################### | ||||
| # run command with arguments and return its exit code and output | ||||
| def run_process(command, stdout=True, stderr=True, stdin=None, cwd=None, env=None, debug=False): | ||||
|  | ||||
|   retcode = -1 | ||||
|   output = [] | ||||
|  | ||||
|   try: | ||||
|     # run the command | ||||
|     retcode, cmdout, cmderr = check_output_input(command, input=stdin.encode() if stdin else None, cwd=cwd, env=env) | ||||
|  | ||||
|     # split the output on newlines to return a list | ||||
|     if stderr and (len(cmderr) > 0): output.extend(cmderr.decode(sys.getdefaultencoding()).split('\n')) | ||||
|     if stdout and (len(cmdout) > 0): output.extend(cmdout.decode(sys.getdefaultencoding()).split('\n')) | ||||
|  | ||||
|   except (FileNotFoundError, OSError, IOError) as e: | ||||
|     if stderr: | ||||
|       output.append("Command {} not found or unable to execute".format(command)) | ||||
|  | ||||
|   if debug: | ||||
|     eprint("{}{} returned {}: {}".format(command, "({})".format(stdin[:80] + bool(stdin[80:]) * '...' if stdin else ""), retcode, output)) | ||||
|  | ||||
|   return retcode, output | ||||
|  | ||||
| ################################################################################################### | ||||
| class AtomicInt: | ||||
|   def __init__(self, value=0): | ||||
|     self.val = RawValue('i', value) | ||||
|     self.lock = Lock() | ||||
|  | ||||
|   def increment(self): | ||||
|     with self.lock: | ||||
|       self.val.value += 1 | ||||
|       return self.val.value | ||||
|  | ||||
|   def decrement(self): | ||||
|     with self.lock: | ||||
|       self.val.value -= 1 | ||||
|       return self.val.value | ||||
|  | ||||
|   def value(self): | ||||
|     with self.lock: | ||||
|       return self.val.value | ||||
|  | ||||
| ################################################################################################### | ||||
| # split a PCAP filename up into tags | ||||
| def tags_from_filename(filespec): | ||||
|   # split tags on these characters | ||||
|   tagSplitterRe = "[,-/_.]+" | ||||
|   # tags to ignore explicitly | ||||
|   regex = re.compile(r'^(\d+|p?cap|dmp|log|bro|zeek|tcpdump|netsniff)$', re.IGNORECASE) | ||||
|   return list(filter(lambda i: not regex.search(i), map(str.strip, filter(None, re.split(tagSplitterRe, filespec))))) | ||||
							
								
								
									
										322
									
								
								Vagrant/resources/malcolm/shared/bin/pcap_watcher.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										322
									
								
								Vagrant/resources/malcolm/shared/bin/pcap_watcher.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,322 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| ################################################################################################### | ||||
| # Monitor a directory for PCAP files for processing (by publishing their filenames to a ZMQ socket) | ||||
| # | ||||
| # Run the script with --help for options | ||||
| ################################################################################################### | ||||
|  | ||||
| import argparse | ||||
| import glob | ||||
| import json | ||||
| import logging | ||||
| import magic | ||||
| import os | ||||
| import pathlib | ||||
| import pyinotify | ||||
| import signal | ||||
| import sys | ||||
| import time | ||||
| import zmq | ||||
|  | ||||
| from pcap_utils import * | ||||
|  | ||||
| import elasticsearch | ||||
| import elasticsearch_dsl | ||||
|  | ||||
| ################################################################################################### | ||||
| MINIMUM_CHECKED_FILE_SIZE_DEFAULT = 24 | ||||
| MAXIMUM_CHECKED_FILE_SIZE_DEFAULT = 32*1024*1024*1024 | ||||
|  | ||||
| ################################################################################################### | ||||
| # for querying the Arkime's "files" Elasticsearch index to avoid re-processing (duplicating sessions for) | ||||
| # files that have already been processed | ||||
| ARKIME_FILES_INDEX = "files" | ||||
| ARKIME_FILE_TYPE = "file" | ||||
| ARKIME_FILE_SIZE_FIELD = "filesize" | ||||
|  | ||||
| ################################################################################################### | ||||
| debug = False | ||||
| verboseDebug = False | ||||
| pdbFlagged = False | ||||
| args = None | ||||
| scriptName = os.path.basename(__file__) | ||||
| scriptPath = os.path.dirname(os.path.realpath(__file__)) | ||||
| origPath = os.getcwd() | ||||
| shuttingDown = False | ||||
|  | ||||
| ################################################################################################### | ||||
| # watch files written to and moved to this directory | ||||
| class EventWatcher(pyinotify.ProcessEvent): | ||||
|  | ||||
|   # notify on files written in-place then closed (IN_CLOSE_WRITE), and moved into this directory (IN_MOVED_TO) | ||||
|   _methods = ["IN_CLOSE_WRITE", "IN_MOVED_TO"] | ||||
|  | ||||
|   def __init__(self): | ||||
|     global args | ||||
|     global debug | ||||
|     global verboseDebug | ||||
|  | ||||
|     super().__init__() | ||||
|  | ||||
|     self.useElastic = False | ||||
|  | ||||
|     # if we're going to be querying Elasticsearch for past PCAP file status, connect now | ||||
|     if args.elasticHost is not None: | ||||
|  | ||||
|       connected = False | ||||
|       healthy = False | ||||
|  | ||||
|       # create the connection to Elasticsearch | ||||
|       while (not connected) and (not shuttingDown): | ||||
|         try: | ||||
|           if debug: eprint(f"{scriptName}:\tconnecting to Elasticsearch {args.elasticHost}...") | ||||
|           elasticsearch_dsl.connections.create_connection(hosts=[args.elasticHost]) | ||||
|           if verboseDebug: eprint(f"{scriptName}:\t{elasticsearch_dsl.connections.get_connection().cluster.health()}") | ||||
|           connected = elasticsearch_dsl.connections.get_connection() is not None | ||||
|  | ||||
|         except elasticsearch.exceptions.ConnectionError as connError: | ||||
|           if debug: eprint(f"{scriptName}:\tElasticsearch connection error: {connError}") | ||||
|  | ||||
|         if (not connected) and args.elasticWaitForHealth: | ||||
|           time.sleep(1) | ||||
|         else: | ||||
|           break | ||||
|  | ||||
|       # if requested, wait for at least "yellow" health in the cluster for the "files" index | ||||
|       while connected and args.elasticWaitForHealth and (not healthy) and (not shuttingDown): | ||||
|         try: | ||||
|           if debug: eprint(f"{scriptName}:\twaiting for Elasticsearch to be healthy") | ||||
|           elasticsearch_dsl.connections.get_connection().cluster.health(index=ARKIME_FILES_INDEX, wait_for_status='yellow') | ||||
|           if verboseDebug: eprint(f"{scriptName}:\t{elasticsearch_dsl.connections.get_connection().cluster.health()}") | ||||
|           healthy = True | ||||
|  | ||||
|         except elasticsearch.exceptions.ConnectionTimeout as connError: | ||||
|           if verboseDebug: eprint(f"{scriptName}:\tElasticsearch health check: {connError}") | ||||
|  | ||||
|         if (not healthy): | ||||
|           time.sleep(1) | ||||
|  | ||||
|       self.useElastic = connected and healthy | ||||
|  | ||||
|     # initialize ZeroMQ context and socket(s) to publish messages to | ||||
|     self.context = zmq.Context() | ||||
|  | ||||
|     # Socket to send messages on | ||||
|     if debug: eprint(f"{scriptName}:\tbinding publisher port {PCAP_TOPIC_PORT}") | ||||
|     self.topic_socket = self.context.socket(zmq.PUB) | ||||
|     self.topic_socket.bind(f"tcp://*:{PCAP_TOPIC_PORT}") | ||||
|  | ||||
|     # todo: do I want to set this? probably not since this guy's whole job is to send | ||||
|     # and if he can't then what's the point? just block | ||||
|     # self.topic_socket.SNDTIMEO = 5000 | ||||
|  | ||||
|     if debug: eprint(f"{scriptName}:\tEventWatcher initialized") | ||||
|  | ||||
| ################################################################################################### | ||||
| # set up event processor to append processed events from to the event queue | ||||
| def event_process_generator(cls, method): | ||||
|  | ||||
|   # actual method called when we are notified of a file | ||||
|   def _method_name(self, event): | ||||
|  | ||||
|     global args | ||||
|     global debug | ||||
|     global verboseDebug | ||||
|  | ||||
|     if debug: eprint(f"{scriptName}:\t👓\t{event.pathname}") | ||||
|  | ||||
|     # the entity must be a regular PCAP file and actually exist | ||||
|     if (not event.dir) and os.path.isfile(event.pathname): | ||||
|  | ||||
|       # get the file magic description and mime type | ||||
|       fileMime = magic.from_file(event.pathname, mime=True) | ||||
|       fileType = magic.from_file(event.pathname) | ||||
|  | ||||
|       # get the file size, in bytes to compare against sane values | ||||
|       fileSize = os.path.getsize(event.pathname) | ||||
|       if (args.minBytes <= fileSize <= args.maxBytes) and ((fileMime in PCAP_MIME_TYPES) or ('pcap-ng' in fileType)): | ||||
|  | ||||
|         relativePath = remove_prefix(event.pathname, os.path.join(args.baseDir, '')) | ||||
|  | ||||
|         # check with Arkime's files index in Elasticsearch and make sure it's not a duplicate | ||||
|         fileIsDuplicate = False | ||||
|         if self.useElastic: | ||||
|           s = elasticsearch_dsl.Search(index=ARKIME_FILES_INDEX) \ | ||||
|               .filter("term", _type=ARKIME_FILE_TYPE) \ | ||||
|               .filter("term", node=args.molochNode) \ | ||||
|               .query("wildcard", name=f"*{os.path.sep}{relativePath}") | ||||
|           response = s.execute() | ||||
|           for hit in response: | ||||
|             fileInfo = hit.to_dict() | ||||
|             if (ARKIME_FILE_SIZE_FIELD in fileInfo) and (fileInfo[ARKIME_FILE_SIZE_FIELD] == fileSize): | ||||
|               fileIsDuplicate = True | ||||
|               break | ||||
|  | ||||
|         if fileIsDuplicate: | ||||
|           # this is duplicate file (it's been processed before) so ignore it | ||||
|           if debug: eprint(f"{scriptName}:\t📋\t{event.pathname}") | ||||
|  | ||||
|         else: | ||||
|           # the entity is a right-sized non-duplicate file, and it exists, so send it to get processed | ||||
|           if debug: eprint(f"{scriptName}:\t📩\t{event.pathname}") | ||||
|           try: | ||||
|             fileInfo = {FILE_INFO_DICT_NAME: event.pathname if args.includeAbsolutePath else relativePath, \ | ||||
|                         FILE_INFO_DICT_SIZE: fileSize, \ | ||||
|                         FILE_INFO_FILE_MIME: fileMime, \ | ||||
|                         FILE_INFO_FILE_TYPE: fileType, \ | ||||
|                         FILE_INFO_DICT_TAGS: tags_from_filename(relativePath)} | ||||
|             self.topic_socket.send_string(json.dumps(fileInfo)) | ||||
|             if debug: eprint(f"{scriptName}:\t📫\t{fileInfo}") | ||||
|           except zmq.Again as timeout: | ||||
|             if verboseDebug: eprint(f"{scriptName}:\t🕑\t{event.pathname}") | ||||
|  | ||||
|       else: | ||||
|         # too small/big to care about, or the wrong type, ignore it | ||||
|         if debug: eprint(f"{scriptName}:\t✋\t{event.pathname}") | ||||
|  | ||||
|   # assign process method to class | ||||
|   _method_name.__name__ = "process_{}".format(method) | ||||
|   setattr(cls, _method_name.__name__, _method_name) | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigint/sigterm and set a global shutdown variable | ||||
| def shutdown_handler(signum, frame): | ||||
|   global shuttingDown | ||||
|   shuttingDown = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr1 for a pdb breakpoint | ||||
| def pdb_handler(sig, frame): | ||||
|   global pdbFlagged | ||||
|   pdbFlagged = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr2 for toggling debug | ||||
| def debug_toggle_handler(signum, frame): | ||||
|   global debug | ||||
|   global debugToggled | ||||
|   debug = not debug | ||||
|   debugToggled = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| def main(): | ||||
|   global args | ||||
|   global debug | ||||
|   global verboseDebug | ||||
|   global debugToggled | ||||
|   global pdbFlagged | ||||
|   global shuttingDown | ||||
|  | ||||
|   parser = argparse.ArgumentParser(description=scriptName, add_help=False, usage='{} <arguments>'.format(scriptName)) | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', help="Verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--extra-verbose', dest='verboseDebug', help="Super verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|  | ||||
|   parser.add_argument('--min-bytes', dest='minBytes', help="Minimum size for checked files", metavar='<bytes>', type=int, default=MINIMUM_CHECKED_FILE_SIZE_DEFAULT, required=False) | ||||
|   parser.add_argument('--max-bytes', dest='maxBytes', help="Maximum size for checked files", metavar='<bytes>', type=int, default=MAXIMUM_CHECKED_FILE_SIZE_DEFAULT, required=False) | ||||
|   parser.add_argument('--elasticsearch', required=False, dest='elasticHost', metavar='<STR>', type=str, default=None, help='Elasticsearch connection string for querying Arkime files index to ignore duplicates') | ||||
|   parser.add_argument('--elasticsearch-wait', dest='elasticWaitForHealth', help="Wait for Elasticsearch to be healthy before starting", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--moloch-node', required=False, dest='molochNode', metavar='<STR>', type=str, default='arkime', help='Arkime node value for querying Arkime files index to ignore duplicates') | ||||
|  | ||||
|   parser.add_argument('--ignore-existing', dest='ignoreExisting', help="Ignore preexisting files in the monitor directory", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--absolute-path', dest='includeAbsolutePath', help="Publish absolute path for message (vs. path relative to monitored directory)", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--start-sleep', dest='startSleepSec', help="Sleep for this many seconds before starting", metavar='<seconds>', type=int, default=0, required=False) | ||||
|   parser.add_argument('-r', '--recursive-directory', dest='recursiveDir', help="If specified, monitor all directories with this name underneath --directory", metavar='<name>', type=str, required=False) | ||||
|   requiredNamed = parser.add_argument_group('required arguments') | ||||
|   requiredNamed.add_argument('-d', '--directory', dest='baseDir', help='Directory to monitor', metavar='<directory>', type=str, required=True) | ||||
|  | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   verboseDebug = args.verboseDebug | ||||
|   debug = args.debug or verboseDebug | ||||
|   if debug: | ||||
|     eprint(os.path.join(scriptPath, scriptName)) | ||||
|     eprint("{} arguments: {}".format(scriptName, sys.argv[1:])) | ||||
|     eprint("{} arguments: {}".format(scriptName, args)) | ||||
|   else: | ||||
|     sys.tracebacklimit = 0 | ||||
|  | ||||
|   logging.basicConfig(level=logging.ERROR) | ||||
|  | ||||
|   # handle sigint and sigterm for graceful shutdown | ||||
|   signal.signal(signal.SIGINT, shutdown_handler) | ||||
|   signal.signal(signal.SIGTERM, shutdown_handler) | ||||
|   signal.signal(signal.SIGUSR1, pdb_handler) | ||||
|   signal.signal(signal.SIGUSR2, debug_toggle_handler) | ||||
|  | ||||
|   # sleep for a bit if requested | ||||
|   sleepCount = 0 | ||||
|   while (not shuttingDown) and (sleepCount < args.startSleepSec): | ||||
|     time.sleep(1) | ||||
|     sleepCount += 1 | ||||
|  | ||||
|   # add events to watch to EventWatcher class | ||||
|   for method in EventWatcher._methods: | ||||
|     event_process_generator(EventWatcher, method) | ||||
|  | ||||
|   # if directory to monitor doesn't exist, create it now | ||||
|   if os.path.isdir(args.baseDir): | ||||
|     preexistingDir = True | ||||
|   else: | ||||
|     preexistingDir = False | ||||
|     if debug: eprint(f'{scriptname}: creating "{args.baseDir}" to monitor') | ||||
|     pathlib.Path(args.baseDir).mkdir(parents=False, exist_ok=True) | ||||
|  | ||||
|   # if recursion was requested, get list of directories to monitor | ||||
|   watchDirs = [] | ||||
|   while (len(watchDirs) == 0): | ||||
|     if args.recursiveDir is None: | ||||
|       watchDirs = [args.baseDir] | ||||
|     else: | ||||
|       watchDirs = glob.glob(f'{args.baseDir}/**/{args.recursiveDir}', recursive=True) | ||||
|  | ||||
|   # begin threaded watch of path(s) | ||||
|   time.sleep(1) | ||||
|  | ||||
|   event_notifier_started = False | ||||
|   watch_manager = pyinotify.WatchManager() | ||||
|   event_notifier = pyinotify.ThreadedNotifier(watch_manager, EventWatcher()) | ||||
|   for watchDir in watchDirs: | ||||
|     watch_manager.add_watch(os.path.abspath(watchDir), pyinotify.ALL_EVENTS) | ||||
|   if debug: eprint(f"{scriptName}: monitoring {watchDirs}") | ||||
|   time.sleep(2) | ||||
|   if (not shuttingDown): | ||||
|     event_notifier.start() | ||||
|     event_notifier_started = True | ||||
|  | ||||
|   # if there are any previously included files (and not ignoreExisting), "touch" them so that they will be notified on | ||||
|   if preexistingDir and (not args.ignoreExisting) and (not shuttingDown): | ||||
|     filesTouched = 0 | ||||
|     for watchDir in watchDirs: | ||||
|       for preexistingFile in [os.path.join(watchDir, x) for x in pathlib.Path(watchDir).iterdir() if x.is_file()]: | ||||
|         touch(preexistingFile) | ||||
|         filesTouched += 1 | ||||
|     if debug and (filesTouched > 0): | ||||
|       eprint(f"{scriptName}: found {filesTouched} preexisting files to check") | ||||
|  | ||||
|   # loop forever, or until we're told to shut down, whichever comes first | ||||
|   while (not shuttingDown): | ||||
|     if pdbFlagged: | ||||
|       pdbFlagged = False | ||||
|       breakpoint() | ||||
|     time.sleep(0.2) | ||||
|  | ||||
|   # graceful shutdown | ||||
|   if debug: eprint(f"{scriptName}: shutting down...") | ||||
|   if event_notifier_started: | ||||
|     event_notifier.stop() | ||||
|   time.sleep(1) | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}: finished monitoring {watchDirs}") | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										167
									
								
								Vagrant/resources/malcolm/shared/bin/preseed_late_user_config.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										167
									
								
								Vagrant/resources/malcolm/shared/bin/preseed_late_user_config.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,167 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| ################################################################################## | ||||
| # prompt whether to autologin or not | ||||
| # prompt whether or not to lock xscreensaver for the GUI session | ||||
| # prompt whether to use U.S. DoD login banner (https://www.stigviewer.com/stig/general_purpose_operating_system_srg/2015-06-26/finding/V-56585) | ||||
| # prompt for disabling IPV6 or not | ||||
|  | ||||
| # this is a debconf-compatible script | ||||
| . /usr/share/debconf/confmodule | ||||
|  | ||||
| # template for user prompt | ||||
| cat > /tmp/malcolm.template <<'!EOF!' | ||||
| Template: malcolm/autologin | ||||
| Type: boolean | ||||
| Default: true | ||||
| Description: | ||||
|  Automatically login to the GUI session? | ||||
|  | ||||
| Template: malcolm/autologin_title | ||||
| Type: text | ||||
| Description: Autologin? | ||||
|  | ||||
| Template: malcolm/xscreensaver_lock | ||||
| Type: boolean | ||||
| Default: false | ||||
| Description: | ||||
|  Should the GUI session be locked due to inactivity? | ||||
|  | ||||
| Template: malcolm/xscreensaver_title | ||||
| Type: text | ||||
| Description: Lock idle session? | ||||
|  | ||||
| Template: malcolm/dod_banner | ||||
| Type: boolean | ||||
| Default: false | ||||
| Description: | ||||
|  Display the Standard Mandatory DoD Notice and Consent Banner? | ||||
|  | ||||
| Template: malcolm/dod_banner_title | ||||
| Type: text | ||||
| Description: Use U.S. DoD login banner? | ||||
|  | ||||
| Template: malcolm/disable_ipv6 | ||||
| Type: boolean | ||||
| Default: true | ||||
| Description: | ||||
|  Disable IPv6? | ||||
|  | ||||
| Template: malcolm/disable_ipv6_title | ||||
| Type: text | ||||
| Description: IPv6 | ||||
| !EOF! | ||||
|  | ||||
| # load template | ||||
| db_x_loadtemplatefile /tmp/malcolm.template malcolm | ||||
|  | ||||
| # set title | ||||
| db_settitle malcolm/disable_ipv6_title | ||||
|  | ||||
| # prompt | ||||
| db_input critical malcolm/disable_ipv6 | ||||
| db_go | ||||
|  | ||||
| # get answer to $RET | ||||
| db_get malcolm/disable_ipv6 | ||||
|  | ||||
| # store answer in /etc/sysctl.conf and /etc/default/grub | ||||
| if [ "$RET" = false ]; then | ||||
|   DISABLE_IPV6_VAL=0 | ||||
| else | ||||
|   DISABLE_IPV6_VAL=1 | ||||
| fi | ||||
|  | ||||
| sed -i "s/\(disable_ipv6=\)[[:digit:]]\+/\1$DISABLE_IPV6_VAL/g" /etc/sysctl.conf 2>/dev/null || true | ||||
| sed -i "s/\(ipv6\.disable=\)[[:digit:]]\+/\1$DISABLE_IPV6_VAL/g" /etc/default/grub 2>/dev/null || true | ||||
|  | ||||
| echo "malcolm/disable_ipv6=$RET" > /tmp/malcolm.answer | ||||
|  | ||||
| # set title | ||||
| db_settitle malcolm/autologin_title | ||||
|  | ||||
| # prompt | ||||
| db_input critical malcolm/autologin | ||||
| db_go | ||||
|  | ||||
| # get answer to $RET | ||||
| db_get malcolm/autologin | ||||
|  | ||||
| # store answer in /etc/lightdm/lightdm.conf for autologin | ||||
| if [ -n $RET ] && [ -f /etc/lightdm/lightdm.conf ]; then | ||||
|   MAIN_USER="$(id -nu 1000)" | ||||
|   if [ -n $MAIN_USER ] && [ "$RET" = true ]; then | ||||
|     sed -i "s/^#\(autologin-user=\).*/\1$MAIN_USER/" /etc/lightdm/lightdm.conf | ||||
|     sed -i 's/^#\(autologin-user-timeout=\)/\1/' /etc/lightdm/lightdm.conf | ||||
|   else | ||||
|   	sed -i 's/^\(autologin-user=\)/#\1/' /etc/lightdm/lightdm.conf | ||||
|   	sed -i 's/^\(autologin-user-timeout=\)/#\1/' /etc/lightdm/lightdm.conf | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| echo "malcolm/autologin=$RET" > /tmp/malcolm.answer | ||||
|  | ||||
| # set title | ||||
| db_settitle malcolm/xscreensaver_title | ||||
|  | ||||
| # prompt | ||||
| db_input critical malcolm/xscreensaver_lock | ||||
| db_go | ||||
|  | ||||
| # get answer to $RET | ||||
| db_get malcolm/xscreensaver_lock | ||||
|  | ||||
| # store places defaults can exist for xscreensaver lock | ||||
| if [ -n $RET ]; then | ||||
|   URET="$(echo "$RET" | sed -r 's/\<./\U&/')" | ||||
|   sed -i "s/^\(xscreensaver.lock:\).*$/\1 $RET/g" /etc/skel/.Xresources 2>/dev/null || true | ||||
|   sed -i "s/^\(lock:\).*$/\1		$URET/g" /etc/skel/.xscreensaver 2>/dev/null  || true | ||||
|   sed -i "s/^\(\*lock:\).*$/\1			$URET/g" /etc/X11/app-defaults/XScreenSaver* 2>/dev/null || true | ||||
|   # at this point users have already been created, so we need to re-apply our changes there | ||||
|   for HOMEDIR in $(getent passwd | cut -d: -f6); do | ||||
|     [ -f /etc/skel/.Xresources ] && [ -f "$HOMEDIR"/.Xresources ] && cp -f /etc/skel/.Xresources "$HOMEDIR"/.Xresources | ||||
|     [ -f /etc/skel/.xscreensaver ] && [ -f "$HOMEDIR"/.xscreensaver ] && cp -f /etc/skel/.xscreensaver "$HOMEDIR"/.xscreensaver | ||||
|   done | ||||
| fi | ||||
|  | ||||
| echo "malcolm/xscreensaver_lock=$RET" >> /tmp/malcolm.answer | ||||
|  | ||||
| # set title | ||||
| db_settitle malcolm/dod_banner_title | ||||
|  | ||||
| # prompt | ||||
| db_input critical malcolm/dod_banner | ||||
| db_go | ||||
|  | ||||
| # get answer to $RET | ||||
| db_get malcolm/dod_banner | ||||
|  | ||||
| if [ "$RET" = true ]; then | ||||
|   # login banner | ||||
|   OLD_ISSUE="$(grep ^Debian /etc/issue | sed -r "s@[[:space:]]\\\.*@@g")" | ||||
|   cat << 'EOF' > /etc/issue | ||||
| You are accessing a U.S. Government (USG) Information System (IS) that is provided for USG-authorized use only. | ||||
| By using this IS (which includes any device attached to this IS), you consent to the following conditions: | ||||
| -The USG routinely intercepts and monitors communications on this IS for purposes including, but not limited to, penetration testing, COMSEC monitoring, network operations and defense, personnel misconduct (PM), law enforcement (LE), and counterintelligence (CI) investigations. | ||||
| -At any time, the USG may inspect and seize data stored on this IS. | ||||
| -Communications using, or data stored on, this IS are not private, are subject to routine monitoring, interception, and search, and may be disclosed or used for any USG-authorized purpose. | ||||
| -This IS includes security measures (e.g., authentication and access controls) to protect USG interests--not for your personal benefit or privacy. | ||||
| -Notwithstanding the above, using this IS does not constitute consent to PM, LE or CI investigative searching or monitoring of the content of privileged communications, or work product, related to personal representation or services by attorneys, psychotherapists, or clergy, and their assistants. Such communications and work product are private and confidential. See User Agreement for details. | ||||
|  | ||||
| EOF | ||||
|   /bin/echo -E "$OLD_ISSUE \n \l" >> /etc/issue | ||||
|   echo >> /etc/issue | ||||
|   if [ -f /usr/local/bin/dod-login-banner.sh ]; then | ||||
|     [ -f /etc/xdg/lxsession/LXDE/autostart ] && echo "@/usr/local/bin/dod-login-banner.sh" >> /etc/xdg/lxsession/LXDE/autostart | ||||
|     for HOMEDIR in $(getent passwd | cut -d: -f6); do | ||||
|       [ -f "$HOMEDIR"/.config/lxsession/LXDE/autostart ] && echo "@/usr/local/bin/dod-login-banner.sh" >> "$HOMEDIR"/.config/lxsession/LXDE/autostart | ||||
|     done | ||||
|   fi | ||||
|  | ||||
| else | ||||
|   rm -f /usr/local/bin/dod-login-banner.sh | ||||
| fi | ||||
|  | ||||
| echo "malcolm/dod_banner=$RET" >> /tmp/malcolm.answer | ||||
							
								
								
									
										3
									
								
								Vagrant/resources/malcolm/shared/bin/preseed_partman_determine_disk.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										3
									
								
								Vagrant/resources/malcolm/shared/bin/preseed_partman_determine_disk.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| #!/bin/sh | ||||
|  | ||||
| parted_devices | egrep "^($(find /sys/block -mindepth 1 -maxdepth 1 -type l \( -name '[hs]d*' -o -name 'nvme*' \) -exec ls -l '{}' ';' | grep -v "usb" | sed 's@^.*\([hs]d[a-z]\+\|nvme[0-9]\+\).*$@/dev/\1@' | sed -e :a -e '$!N; s/\n/|/; ta'))" | sort -k2n | head -1 | cut -f1 | ||||
							
								
								
									
										99
									
								
								Vagrant/resources/malcolm/shared/bin/prune_files.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										99
									
								
								Vagrant/resources/malcolm/shared/bin/prune_files.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,99 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| # recursion depth (1 = not recursive) | ||||
| DEPTH=1 | ||||
|  | ||||
| # threshold is an integer percentage between 1-100; the script will prune until disk usage drops below the threshold | ||||
| THRESHOLD=90 # defaults to "prune when usage >= 90%"; | ||||
|  | ||||
| # if specified, this script will check and prune every $INTERVAL seconds | ||||
| INTERVAL=0 # defaults to "run once then exit" | ||||
|  | ||||
| VERBOSE=0 # defaults to "not verbose" | ||||
|  | ||||
| while getopts t:p:i:rv opts; do | ||||
|    case ${opts} in | ||||
|       p) PRUNE_PATH=${OPTARG} ;; | ||||
|       t) THRESHOLD=${OPTARG} ;; | ||||
|       i) INTERVAL=${OPTARG} ;; | ||||
|       r) DEPTH=999 ;; | ||||
|       v) VERBOSE=1 ;; | ||||
|    esac | ||||
| done | ||||
|  | ||||
| INT_RE='^[0-9]+$' | ||||
|  | ||||
| if [ -z $PRUNE_PATH ] || [ ! -e "$PRUNE_PATH" ] || ! pushd >/dev/null 2>&1 $PRUNE_PATH ; then | ||||
|   echo "Please specify prune path with -p" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [ -z $THRESHOLD ] || [[ ! "$THRESHOLD" =~ $INT_RE ]] || ! [ "$THRESHOLD" -ge 1 -a "$THRESHOLD" -le 100 ] ; then | ||||
|   echo "Please specify prune threshold (percentage, 1-100) with -t" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| if [[ ! "$INTERVAL" =~ $INT_RE ]] || ! [ "$INTERVAL" -ge 0 -a "$INTERVAL" -le 86400 ] ; then | ||||
|   echo "Please specify prune check interval (seconds, 0-86400) with -i (0 = run once)" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| while true ; do | ||||
|  | ||||
|   # check initial disk capacity | ||||
|   USAGE=$(df -k . | awk '{gsub("%",""); capacity=$5}; END {print capacity}') | ||||
|   if [ $USAGE -gt $THRESHOLD ] ; then | ||||
|  | ||||
|     # we have exceeded the threshold, see if there is something to prune | ||||
|     [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE% of capacity, pruning..." | ||||
|  | ||||
|     # read files by modification time, oldest first, deleting until we've dropped below the threshold | ||||
|     DELETED=0 | ||||
|     while IFS='' read -r -d ' ' FILE_TIME && IFS='' read -r -d ' ' FILE_SIZE && IFS='' read -r -d '' FILE_TO_DELETE; do | ||||
|  | ||||
|       FILE_SIZE_HUMAN=$(numfmt --to=iec-i --suffix=B $FILE_SIZE) | ||||
|       FILE_TIME_HUMAN=$(date -u -d @$FILE_TIME) | ||||
|  | ||||
|       if [ -f "$FILE_TO_DELETE" ]; then | ||||
|         if rm -f "$FILE_TO_DELETE" ; then | ||||
|           DELETED=$((DELETED+1)) | ||||
|  | ||||
|           echo "Pruned \"$FILE_TO_DELETE\" ($FILE_SIZE_HUMAN, $FILE_TIME_HUMAN)" | ||||
|  | ||||
|           # re-check disk capacity | ||||
|           USAGE=$(df -k . | awk '{gsub("%",""); capacity=$5}; END {print capacity}') | ||||
|           if [ $USAGE -gt $THRESHOLD ] ; then | ||||
|             # we still exceed the threshold, continue to loop | ||||
|             [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE% of capacity, pruning..." | ||||
|           else | ||||
|             # we're below the limit, break | ||||
|             [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE% of capacity" | ||||
|             break | ||||
|           fi | ||||
|  | ||||
|         fi # file was rm'ed | ||||
|       fi # file exists | ||||
|  | ||||
|     done < <(find . -xdev -mindepth 1 -maxdepth $DEPTH -ignore_readdir_race -type f \( ! -path '*/spool/*' -o -path '*/spool/tmp*' \) -printf '%T@ %s %p\0' 2>/dev/null | sort -zn 2>/dev/null) | ||||
|  | ||||
|     if [ $DELETED -gt 0 ] ; then | ||||
|       [[ "$VERBOSE" == "1" ]] && echo "Pruned $DELETED files in \"$PRUNE_PATH\"" | ||||
|     else | ||||
|       echo "Nothing was pruned in \"$PRUNE_PATH\"!" | ||||
|     fi | ||||
|  | ||||
|   else | ||||
|     [[ "$VERBOSE" == "1" ]] && echo "\"$PRUNE_PATH\" is at $USAGE% of capacity" | ||||
|   fi | ||||
|  | ||||
|   if [ $INTERVAL -gt 0 ] ; then | ||||
|     sleep $INTERVAL | ||||
|   else | ||||
|     break | ||||
|   fi | ||||
|  | ||||
| done | ||||
|  | ||||
| popd >/dev/null 2>&1 | ||||
							
								
								
									
										486
									
								
								Vagrant/resources/malcolm/shared/bin/sensor-capture-disk-config.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										486
									
								
								Vagrant/resources/malcolm/shared/bin/sensor-capture-disk-config.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,486 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| ################################################################################################### | ||||
| # Detect, partition, and format devices to be used for sensor packet/log captures. | ||||
| # | ||||
| # Run the script with --help for options | ||||
| ################################################################################################### | ||||
|  | ||||
| import os | ||||
| import re | ||||
| import glob | ||||
| import sys | ||||
| import uuid | ||||
| import argparse | ||||
| import fileinput | ||||
| from collections import defaultdict | ||||
| from sensorcommon import * | ||||
| from fstab import Fstab | ||||
|  | ||||
| MINIMUM_CAPTURE_DEVICE_BYTES = 100*1024*1024*1024 # 100GiB | ||||
| CAPTURE_MOUNT_ROOT_PATH = "/capture" | ||||
| CAPTURE_MOUNT_PCAP_DIR = "pcap" | ||||
| CAPTURE_MOUNT_ZEEK_DIR = "bro" | ||||
| FSTAB_FILE = "/etc/fstab" | ||||
| CRYPTTAB_FILE = "/etc/crypttab" | ||||
| CAPTURE_GROUP_OWNER = "netdev" | ||||
| CAPTURE_USER_UID = 1000 | ||||
| CAPTURE_DIR_PERMS = 0o750 | ||||
| CAPTURE_SUBDIR_PERMS = 0o770 | ||||
| SENSOR_CAPTURE_CONFIG = '/opt/sensor/sensor_ctl/control_vars.conf' | ||||
| CAPTURE_CRYPT_KEYFILE = '/etc/capture_crypt.key' | ||||
| CAPTURE_CRYPT_KEYFILE_PERMS = 0o600 | ||||
| CAPTURE_CRYPT_DEV_PREFIX = 'capture_vault_' | ||||
|  | ||||
| debug = False | ||||
|  | ||||
| ################################################################################################### | ||||
| # used to map output of lsblk | ||||
| class PartitionInfo: | ||||
|   __slots__ = ('device', 'partition', 'mapper', 'uuid', 'mount') | ||||
|   def __init__(self, device=None, partition=None, mapper=None, uuid=None, mount=None): | ||||
|     self.device = device | ||||
|     self.partition = partition | ||||
|     self.mapper = mapper | ||||
|     self.uuid = uuid | ||||
|     self.mount = mount | ||||
|  | ||||
| ################################################################################################### | ||||
| # get interactive user response to Y/N question | ||||
| def YesOrNo(question): | ||||
|   reply = str(input(question+' (y/n): ')).lower().strip() | ||||
|   if reply[0] == 'y': | ||||
|     return True | ||||
|   elif reply[0] == 'n': | ||||
|     return False | ||||
|   else: | ||||
|     return YesOrNo(question) | ||||
|  | ||||
| ################################################################################################### | ||||
| # create a name we can use for a mapper device name for encryption | ||||
| def CreateMapperName(device): | ||||
|   return f"{CAPTURE_CRYPT_DEV_PREFIX}{''.join([c if c.isalnum() else '_' for c in remove_prefix(device, '/dev/')])}" | ||||
|  | ||||
| def CreateMapperDeviceName(device): | ||||
|   return f"/dev/mapper/{CreateMapperName(device)}" | ||||
|  | ||||
| ################################################################################################### | ||||
|  | ||||
| ################################################################################################### | ||||
| # determine if a device (eg., sda) is an internal (True) or removable (False) device | ||||
| def IsInternalDevice(name): | ||||
|   rootdir_pattern = re.compile(r'^.*?/devices') | ||||
|  | ||||
|   removableFlagFile = '/sys/block/%s/device/block/%s/removable' % (name, name) | ||||
|   if not os.path.isfile(removableFlagFile): | ||||
|     removableFlagFile = '/sys/block/%s/removable' % (name) | ||||
|   if os.path.isfile(removableFlagFile): | ||||
|     with open(removableFlagFile) as f: | ||||
|       if f.read(1) == '1': | ||||
|         return False | ||||
|  | ||||
|   path = rootdir_pattern.sub('', os.readlink('/sys/block/%s' % name)) | ||||
|   hotplug_buses = ("usb", "ieee1394", "mmc", "pcmcia", "firewire") | ||||
|   for bus in hotplug_buses: | ||||
|     if os.path.exists('/sys/bus/%s' % bus): | ||||
|       for device_bus in os.listdir('/sys/bus/%s/devices' % bus): | ||||
|         device_link = rootdir_pattern.sub('', os.readlink('/sys/bus/%s/devices/%s' % (bus, device_bus))) | ||||
|         if re.search(device_link, path): | ||||
|           return False | ||||
|  | ||||
|   return True | ||||
|  | ||||
| ################################################################################################### | ||||
| # return a list of internal storage devices (eg., [sda, sdb]) | ||||
| def GetInternalDevices(): | ||||
|   devs = [] | ||||
|   for path in glob.glob('/sys/block/*/device'): | ||||
|     name = re.sub('.*/(.*?)/device', r'\g<1>', path) | ||||
|     if IsInternalDevice(name): | ||||
|       devs.append(name) | ||||
|   return devs | ||||
|  | ||||
| ################################################################################################### | ||||
| # given a device (any file descriptor, actually) return size in bytes by seeking to the end | ||||
| def GetDeviceSize(device): | ||||
|   fd = os.open(device, os.O_RDONLY) | ||||
|   try: | ||||
|     return os.lseek(fd, 0, os.SEEK_END) | ||||
|   finally: | ||||
|     os.close(fd) | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| ################################################################################################### | ||||
| def main(): | ||||
|  | ||||
|   # to parse fdisk output, look for partitions after partitions header line | ||||
|   fdisk_pars_begin_pattern = re.compile(r'^Device\s+Start\s+End\s+Sectors\s+Size\s+Type\s*$') | ||||
|   # to parse partitions from fdisk output after parted creates partition table | ||||
|   fdisk_par_pattern = re.compile(r'^(?P<device>\S+)\s+(?P<start>\d+)\s+(?P<end>\d+)\s+(?P<sectors>\d+)\s+(?P<size>\S+)\s+(?P<type>.*)$') | ||||
|  | ||||
|   # extract arguments from the command line | ||||
|   parser = argparse.ArgumentParser(description='sensor-capture-disk-config.py', add_help=False, usage='sensor-capture-disk-config.py [options]') | ||||
|   parser.add_argument('-i', '--interactive', dest='interactive', type=str2bool, nargs='?', const=True, default=False, help="Interactive") | ||||
|   parser.add_argument('-u', '--umount', dest='umount', type=str2bool, nargs='?', const=True, default=False, help="Unmount capture directories before determining candidate drives") | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', type=str2bool, nargs='?', const=True, default=False, help="Verbose output") | ||||
|   parser.add_argument('-n', '--dry-run', dest='dryrun', type=str2bool, nargs='?', const=True, default=False, help="Dry run (don't perform actions)") | ||||
|   parser.add_argument('-c', '--crypto', dest='encrypt', type=str2bool, nargs='?', const=True, default=False, help="Encrypt formatted volumes") | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   debug = args.debug | ||||
|   if debug: eprint(f"Arguments: {sys.argv[1:]}") | ||||
|   if debug: eprint(f"Arguments: {args}") | ||||
|  | ||||
|   # unmount existing mounts if requested | ||||
|   if args.umount and (not args.dryrun): | ||||
|     if (not args.interactive) or YesOrNo(f'Unmount any mounted capture path(s)?'): | ||||
|       if debug: eprint(f"Attempting unmount of capture path(s)...") | ||||
|       run_process(f"umount {os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_PCAP_DIR)}") | ||||
|       run_process(f"umount {os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_ZEEK_DIR)}") | ||||
|       run_process(f"umount {CAPTURE_MOUNT_ROOT_PATH}") | ||||
|       # also luksClose any luks volumes devices we might have set up | ||||
|       for cryptDev in [remove_prefix(x, '/dev/mapper/') for x in glob.glob(f"/dev/mapper/{CAPTURE_CRYPT_DEV_PREFIX}*")]: | ||||
|         if debug: eprint(f"Running crypsetup luksClose on {cryptDev}...") | ||||
|         _, cryptOut = run_process(f"/sbin/cryptsetup --verbose luksClose {cryptDev}", stdout=True, stderr=True, timeout=300) | ||||
|         if debug: | ||||
|           for line in cryptOut: | ||||
|             eprint(f"\t{line}") | ||||
|       _, reloadOut = run_process(f"systemctl daemon-reload") | ||||
|  | ||||
|   # check existing mounts, if the capture path(s) are already mounted, then abort | ||||
|   with open('/proc/mounts', 'r') as f: | ||||
|     for line in f.readlines(): | ||||
|       mountDetails = line.split() | ||||
|       if (len(mountDetails) >= 2): | ||||
|         mountPoint = mountDetails[1] | ||||
|         if mountPoint.startswith(CAPTURE_MOUNT_ROOT_PATH): | ||||
|           eprint(f"It appears there is already a device mounted under {CAPTURE_MOUNT_ROOT_PATH} at {mountPoint}.") | ||||
|           eprint(f"If you wish to continue, you may run this script with the '-u|--umount' option to umount first.") | ||||
|           eprint() | ||||
|           parser.print_help() | ||||
|           exit(2) | ||||
|  | ||||
|   # get physical disks, partitions, device maps, and any mountpoints and UUID associated | ||||
|   allDisks = defaultdict(list) | ||||
|   if debug: eprint(f"Block devices:") | ||||
|   for device in GetInternalDevices(): | ||||
|     ecode, deviceTree = run_process(f'/bin/lsblk -o name,uuid,mountpoint --paths --noheadings /dev/{device}', stdout=True, stderr=False) | ||||
|     if (ecode == 0): | ||||
|       currentDev = None | ||||
|       currentPar = None | ||||
|       currentMapper = None | ||||
|       for line in deviceTree: | ||||
|         line = line.rstrip() | ||||
|         if (len(line) > 0): | ||||
|           if debug: eprint(f"\t{line}") | ||||
|           if (line == f"/dev/{device}"): | ||||
|             currentDev = line | ||||
|             currentPar = None | ||||
|             currentMapper = None | ||||
|             allDisks[currentDev].append(PartitionInfo(device=currentDev)) | ||||
|           elif (currentDev is not None) and (line[2:2+len(f"/dev/{device}")] == f"/dev/{device}"): | ||||
|             parInfo = f"/{line.split('/', 1)[-1]}".split() | ||||
|             if (len(parInfo) >= 2): | ||||
|               currentPar = PartitionInfo(device=currentDev, partition=parInfo[0], uuid=parInfo[1], mount=parInfo[2] if (len(parInfo) > 2) else None) | ||||
|               currentMapper = None | ||||
|               allDisks[currentDev].append(currentPar) | ||||
|           elif (currentPar is not None) and ("/dev/mapper/" in line): | ||||
|             parInfo = f"/{line.split('/', 1)[-1]}".split() | ||||
|             if (len(parInfo) >= 2): | ||||
|               currentMapper = PartitionInfo(device=currentDev, partition=currentPar.partition, mapper=parInfo[0], uuid=parInfo[1], mount=parInfo[2] if (len(parInfo) > 2) else None) | ||||
|               allDisks[currentDev].append(currentMapper) | ||||
|  | ||||
|   # at this point allDisks might look like this: | ||||
|   # defaultdict(<class 'list'>, | ||||
|   #             {'/dev/sda': [PartitionInfo(device='/dev/sda', partition=None, mapper=None, uuid=None, mount=None), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda1', mapper=None, uuid='B42B-7414', mount=None), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda2', mapper=None, uuid='6DF8-D966', mount='/boot/efi'), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda3', mapper=None, uuid='f6b575e4-0ec2-47ab-8d0a-9d677ac4fe3c', mount='/boot'), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda4', mapper=None, uuid='Lmx30A-U9qR-kDZF-WOju-zlOi-otrR-WNjh7j', mount=None), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda4', mapper='/dev/mapper/main-swap', uuid='00987200-7157-45d1-a233-90cbb22554aa', mount='[SWAP]'), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda4', mapper='/dev/mapper/main-root', uuid='b53ea5c3-8771-4717-9d3d-ef9c5b18bbe4', mount='/'), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda4', mapper='/dev/mapper/main-var', uuid='45aec3eb-68be-4eaa-bf79-de3f2a85c103', mount='/var'), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda4', mapper='/dev/mapper/main-audit', uuid='339ee49c-0e45-4510-8447-55f46f2a3653', mount='/var/log/audit'), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda4', mapper='/dev/mapper/main-tmp', uuid='b305d781-263f-4016-8422-301f61c11472', mount='/tmp'), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda4', mapper='/dev/mapper/main-opt', uuid='5e7cbfb8-760e-4526-90d5-ab103ae626a5', mount='/opt'), | ||||
|   #                           PartitionInfo(device='/dev/sda', partition='/dev/sda4', mapper='/dev/mapper/main-home', uuid='1b089fc0-f3a4-400b-955c-d3fa6b1e2a5f', mount='/home')], | ||||
|   #              '/dev/sdb': [PartitionInfo(device='/dev/sdb', partition=None, mapper=None, uuid=None, mount=None)]}) | ||||
|  | ||||
|   candidateDevs = [] | ||||
|   formattedDevs = [] | ||||
|  | ||||
|   # determine candidate storage devices, which are any disks that do not have a mount point associated with | ||||
|   # it in any way, (no partitions, mappings, etc. that are mounted) and is at least 100 gigabytes | ||||
|   for device, entries in allDisks.items(): | ||||
|     deviceMounts = list(set([par.mount for par in entries if par.mount is not None])) | ||||
|     if (len(deviceMounts) == 0) and (GetDeviceSize(device) >= MINIMUM_CAPTURE_DEVICE_BYTES): | ||||
|       candidateDevs.append(device) | ||||
|  | ||||
|   # sort candidate devices largest to smallest | ||||
|   candidateDevs = sorted(candidateDevs, key=lambda x: GetDeviceSize(x), reverse=True) | ||||
|   if debug: eprint(f"Device candidates: {[(x, sizeof_fmt(GetDeviceSize(x))) for x in candidateDevs]}") | ||||
|  | ||||
|   if len(candidateDevs) > 0: | ||||
|  | ||||
|     if args.encrypt: | ||||
|       # create keyfile (will be on the encrypted system drive, and used to automatically unlock the encrypted capture drives) | ||||
|       with open(CAPTURE_CRYPT_KEYFILE, 'wb') as f: | ||||
|         f.write(os.urandom(4096)) | ||||
|       os.chown(CAPTURE_CRYPT_KEYFILE, 0, 0) | ||||
|       os.chmod(CAPTURE_CRYPT_KEYFILE, CAPTURE_CRYPT_KEYFILE_PERMS) | ||||
|  | ||||
|     # partition/format each candidate device | ||||
|     for device in candidateDevs: | ||||
|  | ||||
|       # we only need at most two drives (one for pcap, one for zeek), or at least one | ||||
|       if (len(formattedDevs) >= 2): break | ||||
|  | ||||
|       if (not args.interactive) or YesOrNo(f'Partition and format {device}{" (dry-run)" if args.dryrun else ""}?'): | ||||
|  | ||||
|         if args.dryrun: | ||||
|           eprint(f"Partitioning {device} (dry run only)...") | ||||
|           eprint(f'\t/sbin/parted --script --align optimal {device} -- mklabel gpt \\\n\t\tunit mib mkpart primary 1 100%') | ||||
|           ecode = 0 | ||||
|           partedOut = [] | ||||
|  | ||||
|         else: | ||||
|           # use parted to create a gpt partition table with a single partition consuming 100% of the disk minus one megabyte at the beginning | ||||
|           if debug: eprint(f"Partitioning {device}...") | ||||
|           ecode, partedOut = run_process(f'/sbin/parted --script --align optimal {device} -- mklabel gpt \\\n unit mib mkpart primary 1 100%', stdout=True, stderr=True, timeout=300) | ||||
|           if debug: eprint(partedOut) | ||||
|           if (ecode == 0): | ||||
|             if debug: eprint(f"Success partitioning {device}") | ||||
|  | ||||
|             # get the list of partitions from the newly partitioned device (should be just one) | ||||
|             _, fdiskOut = run_process(f'fdisk -l {device}') | ||||
|             pars = [] | ||||
|             parsList = False | ||||
|             for line in fdiskOut: | ||||
|               if debug: eprint(f"\t{line}") | ||||
|               if (not parsList) and fdisk_pars_begin_pattern.search(line): | ||||
|                 parsList = True | ||||
|               elif parsList: | ||||
|                 match = fdisk_par_pattern.search(line) | ||||
|                 if match is not None: | ||||
|                   pars.append(match.group('device')) | ||||
|  | ||||
|             if len(pars) == 1: | ||||
|  | ||||
|               parDev = pars[0] | ||||
|               parUuid = str(uuid.uuid4()) | ||||
|               parMapperDev = None | ||||
|               okToFormat = True | ||||
|  | ||||
|               if args.encrypt: | ||||
|                 okToFormat = False | ||||
|  | ||||
|                 # remove this device from /etc/crypttab | ||||
|                 if os.path.isfile(CRYPTTAB_FILE): | ||||
|                   with fileinput.FileInput(CRYPTTAB_FILE, inplace=True, backup='.bak') as f: | ||||
|                     for line in f: | ||||
|                       line = line.rstrip("\n") | ||||
|                       if line.startswith(f"{CreateMapperName(parDev)}"): | ||||
|                         if debug: eprint(f"removed {line} from {CRYPTTAB_FILE}") | ||||
|                       else: | ||||
|                         print(line) | ||||
|  | ||||
|                 _, reloadOut = run_process(f"systemctl daemon-reload") | ||||
|  | ||||
|                 # for good measure, run luksErase in case it was a previous luks volume | ||||
|                 if debug: eprint(f"Running crypsetup luksErase on {parDev}...") | ||||
|                 _, cryptOut = run_process(f"/sbin/cryptsetup --verbose --batch-mode luksErase {parDev}", stdout=True, stderr=True, timeout=600) | ||||
|                 if debug: | ||||
|                   for line in cryptOut: | ||||
|                     eprint(f"\t{line}") | ||||
|  | ||||
|                 _, reloadOut = run_process(f"systemctl daemon-reload") | ||||
|  | ||||
|                 # luks volume creation | ||||
|  | ||||
|                 # format device as a luks volume | ||||
|                 if debug: eprint(f"Running crypsetup luksFormat on {device}...") | ||||
|                 ecode, cryptOut = run_process(f"/sbin/cryptsetup --verbose --batch-mode luksFormat {parDev} --uuid='{parUuid}' --key-file {CAPTURE_CRYPT_KEYFILE}", stdout=True, stderr=True, timeout=3600) | ||||
|                 if debug or (ecode != 0): | ||||
|                   for line in cryptOut: | ||||
|                     eprint(f"\t{line}") | ||||
|                 if (ecode == 0): | ||||
|  | ||||
|                   # open the luks volume in /dev/mapper/ | ||||
|                   if debug: eprint(f"Running crypsetup luksOpen on {device}...") | ||||
|                   parMapperDev = CreateMapperDeviceName(parDev) | ||||
|                   ecode, cryptOut = run_process(f"/sbin/cryptsetup --verbose luksOpen {parDev} {CreateMapperName(parDev)} --key-file {CAPTURE_CRYPT_KEYFILE}", stdout=True, stderr=True, timeout=180) | ||||
|                   if debug or (ecode != 0): | ||||
|                     for line in cryptOut: | ||||
|                       eprint(f"\t{line}") | ||||
|                   if (ecode == 0): | ||||
|                     # we have everything we need for luks | ||||
|                     okToFormat = True | ||||
|  | ||||
|                   else: | ||||
|                     eprint(f"Error {ecode} opening LUKS on {parDev}, giving up on {device}") | ||||
|                 else: | ||||
|                   eprint(f"Error {ecode} formatting LUKS on {parDev}, giving up on {device}") | ||||
|  | ||||
|               # format the partition as an XFS file system | ||||
|               if okToFormat: | ||||
|                 if debug: eprint(f'Created {parDev}, assigning {parUuid}') | ||||
|                 if args.encrypt: | ||||
|                   formatCmd = f"/sbin/mkfs.xfs -f {parMapperDev}" | ||||
|                 else: | ||||
|                   formatCmd = f"/sbin/mkfs.xfs -f -m uuid='{parUuid}' {parDev}" | ||||
|                 if debug: eprint(f"Formatting: {formatCmd}") | ||||
|                 ecode, mkfsOut = run_process(formatCmd, stdout=True, stderr=True, timeout=3600) | ||||
|                 if debug: | ||||
|                   for line in mkfsOut: | ||||
|                     eprint(f"\t{line}") | ||||
|                 if (ecode == 0): | ||||
|                   eprint(f"Success formatting {parMapperDev if args.encrypt else parDev}") | ||||
|                   formattedDevs.append(PartitionInfo(device=device, partition=parDev, mapper=parMapperDev, uuid=parUuid, mount=None)) | ||||
|  | ||||
|                 else: | ||||
|                   eprint(f"Error {ecode} formatting {formatPath}, giving up on {device}") | ||||
|  | ||||
|             else: | ||||
|               eprint(f"Error partitioning {device}, unexpected partitions after running parted, giving up on {device}") | ||||
|  | ||||
|           elif (ecode != 0): | ||||
|             eprint(f"Error {ecode} partitioning {device}, giving up on {device}") | ||||
|  | ||||
|     # now that we have formatted our device(s), decide where they're going to mount (these are already sorted) | ||||
|     if len(formattedDevs) >= 2: | ||||
|       formattedDevs[0].mount = os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_PCAP_DIR) | ||||
|       formattedDevs[1].mount = os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_ZEEK_DIR) | ||||
|  | ||||
|     elif len(formattedDevs) == 1: | ||||
|       formattedDevs[0].mount = CAPTURE_MOUNT_ROOT_PATH | ||||
|  | ||||
|     if debug: eprint(formattedDevs) | ||||
|  | ||||
|     # mountpoints are probably not already mounted, but this will make sure | ||||
|     run_process(f"umount {os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_PCAP_DIR)}") | ||||
|     run_process(f"umount {os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_ZEEK_DIR)}") | ||||
|     run_process(f"umount {CAPTURE_MOUNT_ROOT_PATH}") | ||||
|  | ||||
|     _, reloadOut = run_process(f"systemctl daemon-reload") | ||||
|  | ||||
|     # clean out any previous fstab entries that might be interfering from previous configurations | ||||
|     if Fstab.remove_by_mountpoint(os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_PCAP_DIR), path=FSTAB_FILE): | ||||
|       if debug: eprint(f"Removed previous {os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_PCAP_DIR)} mount from {FSTAB_FILE}") | ||||
|     if Fstab.remove_by_mountpoint(os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_ZEEK_DIR), path=FSTAB_FILE): | ||||
|       if debug: eprint(f"Removed previous {os.path.join(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_MOUNT_ZEEK_DIR)} mount from {FSTAB_FILE}") | ||||
|     if Fstab.remove_by_mountpoint(CAPTURE_MOUNT_ROOT_PATH, path=FSTAB_FILE): | ||||
|       if debug: eprint(f"Removed previous {CAPTURE_MOUNT_ROOT_PATH} mount from {FSTAB_FILE}") | ||||
|  | ||||
|     # reload tab files with systemctl | ||||
|     _, reloadOut = run_process(f"systemctl daemon-reload") | ||||
|  | ||||
|     # get the GID of the group of the user(s) that will be doing the capture | ||||
|     try: | ||||
|       ecode, guidGetOut = run_process(f"getent group {CAPTURE_GROUP_OWNER}", stdout=True, stderr=True) | ||||
|       if (ecode == 0) and (len(guidGetOut) > 0): | ||||
|         netdevGuid = int(guidGetOut[0].split(':')[2]) | ||||
|       else: | ||||
|         netdevGuid = -1 | ||||
|     except: | ||||
|       netdevGuid = -1 | ||||
|  | ||||
|     # rmdir any mount directories that might be interfering from previous configurations | ||||
|     if os.path.isdir(CAPTURE_MOUNT_ROOT_PATH): | ||||
|       for root, dirs, files in os.walk(CAPTURE_MOUNT_ROOT_PATH, topdown=False): | ||||
|         for name in dirs: | ||||
|           if debug: eprint(f"Removing {os.path.join(root, name)}") | ||||
|           os.rmdir(os.path.join(root, name)) | ||||
|       if debug: eprint(f"Removing {CAPTURE_MOUNT_ROOT_PATH}") | ||||
|       os.rmdir(CAPTURE_MOUNT_ROOT_PATH) | ||||
|       if debug: eprint(f"Creating {CAPTURE_MOUNT_ROOT_PATH}") | ||||
|       os.makedirs(CAPTURE_MOUNT_ROOT_PATH, exist_ok=True) | ||||
|       os.chown(CAPTURE_MOUNT_ROOT_PATH, -1, netdevGuid) | ||||
|       os.chmod(CAPTURE_MOUNT_ROOT_PATH, CAPTURE_DIR_PERMS) | ||||
|  | ||||
|     # add crypttab entries | ||||
|     if args.encrypt: | ||||
|       with open(CRYPTTAB_FILE, 'a' if os.path.isfile(CRYPTTAB_FILE) else 'w') as f: | ||||
|         for par in formattedDevs: | ||||
|           crypttabLine = f"{CreateMapperName(par.partition)} UUID={par.uuid} {CAPTURE_CRYPT_KEYFILE} luks\n" | ||||
|           f.write(crypttabLine) | ||||
|           if debug: eprint(f'Added "{crypttabLine}" to {CRYPTTAB_FILE}') | ||||
|  | ||||
|     # recreate mount directories and add fstab entries | ||||
|     for par in formattedDevs: | ||||
|       if debug: eprint(f"Creating {par.mount}") | ||||
|       os.makedirs(par.mount, exist_ok=True) | ||||
|       if args.encrypt: | ||||
|         entry = Fstab.add(device=f"{par.mapper}", mountpoint=par.mount, options=f"defaults,inode64,noatime,rw,auto,user,x-systemd.device-timeout=600s", fs_passno=2, filesystem='xfs', path=FSTAB_FILE) | ||||
|       else: | ||||
|         entry = Fstab.add(device=f"UUID={par.uuid}", mountpoint=par.mount, options=f"defaults,inode64,noatime,rw,auto,user,x-systemd.device-timeout=600s", fs_passno=2, filesystem='xfs', path=FSTAB_FILE) | ||||
|       eprint(f'Added "{entry}" to {FSTAB_FILE} for {par.partition}') | ||||
|  | ||||
|     # reload tab files with systemctl | ||||
|     _, reloadOut = run_process(f"systemctl daemon-reload") | ||||
|  | ||||
|     # mount the partitions and create a directory with user permissions | ||||
|     for par in formattedDevs: | ||||
|  | ||||
|       ecode, mountOut = run_process(f"mount {par.mount}") | ||||
|       if (ecode == 0): | ||||
|         if debug: eprint(f'Mounted {par.partition} at {par.mount}') | ||||
|  | ||||
|         userDirs = [] | ||||
|         if par.mount == CAPTURE_MOUNT_ROOT_PATH: | ||||
|           # only one drive, so we're mounted at /capture, create user directories for CAPTURE_MOUNT_ZEEK_DIR and CAPTURE_MOUNT_PCAP_DIR | ||||
|           userDirs.append(os.path.join(par.mount, CAPTURE_MOUNT_PCAP_DIR)) | ||||
|           userDirs.append(os.path.join(par.mount, CAPTURE_MOUNT_ZEEK_DIR)) | ||||
|         else: | ||||
|           # we're mounted somewhere *underneath* /capture, so create a user-writeable subdirectory where we are | ||||
|           userDirs.append(os.path.join(par.mount, 'capture')) | ||||
|  | ||||
|         # set permissions on user dirs | ||||
|         pcapDir = None | ||||
|         zeekDir = None | ||||
|         for userDir in userDirs: | ||||
|           os.makedirs(userDir, exist_ok=True) | ||||
|           os.chown(userDir, CAPTURE_USER_UID, netdevGuid) | ||||
|           os.chmod(userDir, CAPTURE_SUBDIR_PERMS) | ||||
|           if debug: eprint(f'Created "{userDir}" for writing by capture user') | ||||
|           if f"{os.path.sep}{CAPTURE_MOUNT_PCAP_DIR}{os.path.sep}" in userDir: | ||||
|             pcapDir = userDir | ||||
|           elif f"{os.path.sep}{CAPTURE_MOUNT_ZEEK_DIR}{os.path.sep}" in userDir: | ||||
|             zeekDir = userDir | ||||
|  | ||||
|         # replace capture paths in-place in SENSOR_CAPTURE_CONFIG | ||||
|         if os.path.isfile(SENSOR_CAPTURE_CONFIG): | ||||
|           capture_re = re.compile(r"\b(?P<key>PCAP_PATH|ZEEK_LOG_PATH)\s*=\s*.*?$") | ||||
|           with fileinput.FileInput(SENSOR_CAPTURE_CONFIG, inplace=True, backup='.bak') as f: | ||||
|             for line in f: | ||||
|               line = line.rstrip("\n") | ||||
|               log_path_match = capture_re.search(line) | ||||
|               if (log_path_match is not None): | ||||
|                 if (log_path_match.group('key') == 'PCAP_PATH') and (pcapDir is not None): | ||||
|                   print(capture_re.sub(r"\1=%s" % pcapDir, line)) | ||||
|                 elif (log_path_match.group('key') == 'ZEEK_LOG_PATH') and (zeekDir is not None): | ||||
|                   print(capture_re.sub(r"\1=%s" % zeekDir, line)) | ||||
|                 else: | ||||
|                   print(line) | ||||
|               else: | ||||
|                 print(line) | ||||
|  | ||||
|       else: | ||||
|         eprint(f"Error {ecode} mounting {par.partition}") | ||||
|  | ||||
|   else: | ||||
|     eprint(f"Could not find any unmounted devices greater than 100GB, giving up") | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
|  | ||||
|  | ||||
							
								
								
									
										104
									
								
								Vagrant/resources/malcolm/shared/bin/sensor-init.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										104
									
								
								Vagrant/resources/malcolm/shared/bin/sensor-init.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,104 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| SCRIPT_PATH="$(dirname $(realpath -e "${BASH_SOURCE[0]}"))" | ||||
|  | ||||
| echo "sensor" > /etc/installer | ||||
|  | ||||
| MAIN_USER="$(id -nu 1000)" | ||||
|  | ||||
| if [[ -r "$SCRIPT_PATH"/common-init.sh ]]; then | ||||
|   . "$SCRIPT_PATH"/common-init.sh | ||||
|  | ||||
|   # remove default accounts/groups we don't want, create/set directories for non-user users for stig to not complain | ||||
|   CleanDefaultAccounts | ||||
|  | ||||
|   # get a list of the hardware interfaces | ||||
|   PopulateInterfaces | ||||
|  | ||||
|   # set up some sensor-specific stuff | ||||
|   if [[ -d /opt/sensor ]]; then | ||||
|  | ||||
|     # set ownership for /opt/sensor files for sensor UID:GID | ||||
|     chown -R 1000:1000 /opt/sensor | ||||
|     find /opt/sensor/ -type d -exec chmod 750 "{}" \; | ||||
|     find /opt/sensor/ -type f -exec chmod 640 "{}" \; | ||||
|     find /opt/sensor/ -type f -name "*.sh" -exec chmod 750 "{}" \; | ||||
|     find /opt/sensor/ -type f -name "*.keystore" -exec chmod 600 "{}" \; | ||||
|  | ||||
|     if [[ -f /opt/sensor/sensor_ctl/control_vars.conf ]]; then | ||||
|       # if the capture interface hasn't been set in control_vars.conf, set it now | ||||
|       if grep --quiet CAPTURE_INTERFACE=xxxx /opt/sensor/sensor_ctl/control_vars.conf; then | ||||
|         CAP_IFACE="$(DetermineCaptureInterface)" | ||||
|         if [[ -n "${CAP_IFACE}" ]]; then | ||||
|           sed -i "s/CAPTURE_INTERFACE=xxxx/CAPTURE_INTERFACE=${CAP_IFACE}/g" /opt/sensor/sensor_ctl/control_vars.conf | ||||
|         fi | ||||
|       fi | ||||
|       chmod 600 /opt/sensor/sensor_ctl/control_vars.conf* | ||||
|     fi | ||||
|  | ||||
|     [[ -d /opt/sensor/sensor_ctl/moloch/config.ini ]] && chmod 600 /opt/sensor/sensor_ctl/moloch/config.ini | ||||
|  | ||||
|   fi | ||||
|  | ||||
|   # zeekctl won't like being run by a non-root user unless the whole stupid thing is owned by the non-root user | ||||
|   if [[ -d /opt/zeek.orig ]]; then | ||||
|     # as such, we're going to reset zeek to a "clean" state after each reboot. the config files will get | ||||
|     # regenerated when we are about to deploy zeek itself | ||||
|     [[ -d /opt/zeek ]] && rm -rf /opt/zeek | ||||
|     rsync -a /opt/zeek.orig/ /opt/zeek | ||||
|   fi | ||||
|   if [[ -d /opt/zeek ]]; then | ||||
|     chown -R 1000:1000 /opt/zeek/* | ||||
|     [[ -f /opt/zeek/bin/zeek ]] && setcap 'CAP_NET_RAW+eip CAP_NET_ADMIN+eip' /opt/zeek/bin/zeek | ||||
|   fi | ||||
|   if [[ -d /opt/yara-rules ]]; then | ||||
|     mkdir -p /opt/yara-rules/custom | ||||
|     chown -R 1000:1000 /opt/yara-rules/custom | ||||
|     chmod -R 750 /opt/yara-rules/custom | ||||
|   fi | ||||
|  | ||||
|   # if the sensor needs to do clamav scanning, configure it to run as the sensor user | ||||
|   if dpkg -s clamav >/dev/null 2>&1 ; then | ||||
|     mkdir -p /var/log/clamav /var/lib/clamav | ||||
|     chown -R 1000:1000 /var/log/clamav  /var/lib/clamav | ||||
|     chmod -R 750 /var/log/clamav  /var/lib/clamav | ||||
|     sed -i 's/^Foreground .*$/Foreground true/g' /etc/clamav/freshclam.conf | ||||
|     sed -i 's/^Foreground .*$/Foreground true/g' /etc/clamav/clamd.conf | ||||
|     if [[ -d /opt/sensor/sensor_ctl ]]; then | ||||
|       # disable clamd/freshclam logfiles as supervisord will handle the logging from STDOUT instead | ||||
|       sed -i 's@^UpdateLogFile .*$@#UpdateLogFile /var/log/clamav/freshclam.log@g' /etc/clamav/freshclam.conf | ||||
|       sed -i 's@^LogFile .*$@#LogFile /var/log/clamav/clamd.log@g' /etc/clamav/clamd.conf | ||||
|       # use local directory for socket file | ||||
|       mkdir -p /opt/sensor/sensor_ctl/clamav | ||||
|       chown -R 1000:1000 /opt/sensor/sensor_ctl/clamav | ||||
|       chmod -R 750 /opt/sensor/sensor_ctl/clamav | ||||
|       sed -i 's@^LocalSocket .*$@LocalSocket /opt/sensor/sensor_ctl/clamav/clamd.ctl@g' /etc/clamav/clamd.conf | ||||
|     fi | ||||
|     if [[ -n $MAIN_USER ]]; then | ||||
|       sed -i "s/^User .*$/User $MAIN_USER/g" /etc/clamav/clamd.conf | ||||
|       sed -i "s/^LocalSocketGroup .*$/LocalSocketGroup $MAIN_USER/g" /etc/clamav/clamd.conf | ||||
|       sed -i "s/^DatabaseOwner .*$/DatabaseOwner $MAIN_USER/g" /etc/clamav/freshclam.conf | ||||
|     fi | ||||
|     [[ -r /opt/sensor/sensor_ctl/control_vars.conf ]] && source /opt/sensor/sensor_ctl/control_vars.conf | ||||
|     [[ -z $EXTRACTED_FILE_MAX_BYTES ]] && EXTRACTED_FILE_MAX_BYTES=134217728 | ||||
|     sed -i "s/^MaxFileSize .*$/MaxFileSize $EXTRACTED_FILE_MAX_BYTES/g" /etc/clamav/clamd.conf | ||||
|     sed -i "s/^MaxScanSize .*$/MaxScanSize $(echo "$EXTRACTED_FILE_MAX_BYTES * 4" | bc)/g" /etc/clamav/clamd.conf | ||||
|     grep -q "^TCPSocket" /etc/clamav/clamd.conf && (sed -i 's/^TCPSocket .*$/TCPSocket 3310/g' /etc/clamav/clamd.conf) || (echo "TCPSocket 3310" >> /etc/clamav/clamd.conf) | ||||
|   fi | ||||
|  | ||||
|   # if the network configuration files for the interfaces haven't been set to come up on boot, configure that now. | ||||
|   InitializeSensorNetworking | ||||
|  | ||||
|   # fix some permisions to make sure things belong to the right person | ||||
|   [[ -n $MAIN_USER ]] && FixPermissions "$MAIN_USER" | ||||
|  | ||||
|   # block some call-homes | ||||
|   BadTelemetry | ||||
|  | ||||
|   exit 0 | ||||
| else | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
							
								
								
									
										243
									
								
								Vagrant/resources/malcolm/shared/bin/sensorcommon.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										243
									
								
								Vagrant/resources/malcolm/shared/bin/sensorcommon.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,243 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| import argparse | ||||
| import ipaddress | ||||
| import json | ||||
| import os | ||||
| import socket | ||||
| import ssl | ||||
| import subprocess | ||||
| import sys | ||||
| import urllib.request | ||||
|  | ||||
| from base64 import b64encode | ||||
| from bs4 import BeautifulSoup | ||||
| from bs4.element import Comment | ||||
| from contextlib import closing | ||||
| from http.client import HTTPSConnection, HTTPConnection | ||||
| from multiprocessing import RawValue | ||||
| from threading import Lock | ||||
|  | ||||
| NIC_BLINK_SECONDS = 10 | ||||
|  | ||||
| ################################################################################################### | ||||
| class CancelledError(Exception): | ||||
|    """Raised when user cancels the operation""" | ||||
|    pass | ||||
|  | ||||
| ################################################################################################### | ||||
| class Iface: | ||||
|   def __init__(self, name, description): | ||||
|     self.name = name | ||||
|     self.description = description | ||||
|  | ||||
| ################################################################################################### | ||||
| # clear the terminal window and exit the script | ||||
| def clearquit(): | ||||
|   os.system('clear') | ||||
|   sys.exit(0) | ||||
|  | ||||
| ################################################################################################### | ||||
| # print to stderr | ||||
| def eprint(*args, **kwargs): | ||||
|   print(*args, file=sys.stderr, **kwargs) | ||||
|  | ||||
| ################################################################################################### | ||||
| # urlencode each character of a string | ||||
| def aggressive_url_encode(string): | ||||
|   return "".join("%{0:0>2}".format(format(ord(char), "x")) for char in string) | ||||
|  | ||||
| ################################################################################################### | ||||
| # strip a prefix from the beginning of a string if needed | ||||
| def remove_prefix(text, prefix): | ||||
|   if (len(prefix) > 0) and text.startswith(prefix): | ||||
|     return text[len(prefix):] | ||||
|   else: | ||||
|     return text | ||||
|  | ||||
| ################################################################################################### | ||||
| # nice human-readable file sizes | ||||
| def sizeof_fmt(num, suffix='B'): | ||||
|   for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']: | ||||
|     if abs(num) < 1024.0: | ||||
|       return "%3.1f%s%s" % (num, unit, suffix) | ||||
|     num /= 1024.0 | ||||
|   return "%.1f%s%s" % (num, 'Yi', suffix) | ||||
|  | ||||
| ################################################################################################### | ||||
| # convenient boolean argument parsing | ||||
| def str2bool(v): | ||||
|   if v.lower() in ('yes', 'true', 't', 'y', '1'): | ||||
|     return True | ||||
|   elif v.lower() in ('no', 'false', 'f', 'n', '0'): | ||||
|     return False | ||||
|   else: | ||||
|     raise argparse.ArgumentTypeError('Boolean value expected.') | ||||
|  | ||||
| ################################################################################################### | ||||
| # will it float? | ||||
| def isfloat(value): | ||||
|   try: | ||||
|     float(value) | ||||
|     return True | ||||
|   except ValueError: | ||||
|     return False | ||||
|  | ||||
| ################################################################################################### | ||||
| # check a string or list to see if something is a valid IP address | ||||
| def isipaddress(value): | ||||
|   result = True | ||||
|   try: | ||||
|     if isinstance(value, list) or isinstance(value, tuple) or isinstance(value, set): | ||||
|       for v in value: | ||||
|         ip = ipaddress.ip_address(v) | ||||
|     else: | ||||
|       ip = ipaddress.ip_address(value) | ||||
|   except: | ||||
|       result = False | ||||
|   return result | ||||
|  | ||||
| ################################################################################################### | ||||
| # execute a shell process returning its exit code and output | ||||
| def run_process(command, stdout=True, stderr=False, stdin=None, timeout=60): | ||||
|   retcode = -1 | ||||
|   output = [] | ||||
|   p = subprocess.run([command], input=stdin, universal_newlines=True, capture_output=True, shell=True, timeout=timeout) | ||||
|   if p: | ||||
|     retcode = p.returncode | ||||
|     if stderr and p.stderr: | ||||
|       output.extend(p.stderr.splitlines()) | ||||
|     if stdout and p.stdout: | ||||
|       output.extend(p.stdout.splitlines()) | ||||
|  | ||||
|   return retcode, output | ||||
|  | ||||
|  | ||||
| def tag_visible(element): | ||||
|   if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: | ||||
|     return False | ||||
|   if isinstance(element, Comment): | ||||
|     return False | ||||
|   return True | ||||
|  | ||||
| def text_from_html(body): | ||||
|   soup = BeautifulSoup(body, 'html.parser') | ||||
|   texts = soup.findAll(text=True) | ||||
|   visible_texts = filter(tag_visible, texts) | ||||
|   return u" ".join(t.strip() for t in visible_texts).splitlines() | ||||
|  | ||||
| ################################################################################################### | ||||
| # test a connection to an HTTP/HTTPS server | ||||
| def test_connection(protocol="http", host="127.0.0.1", port=80, uri="", username=None, password=None, ssl_verify="full", user_agent="hedgehog"): | ||||
|   status = 400 | ||||
|   message = "Connection error" | ||||
|   output = [] | ||||
|  | ||||
|   if protocol.lower() == "https": | ||||
|     if ssl_verify.lower() == "full": | ||||
|       c = HTTPSConnection(host, port=port) | ||||
|     else: | ||||
|       c = HTTPSConnection(host, port=port, context=ssl._create_unverified_context()) | ||||
|   elif protocol.lower() == "http": | ||||
|     c = HTTPConnection(host) | ||||
|   else: | ||||
|     c = None | ||||
|  | ||||
|   if c: | ||||
|     try: | ||||
|       if username and password: | ||||
|         c.request('GET', f'/{uri}', headers={ 'User-agent': user_agent, 'Authorization' : 'Basic %s' %  b64encode(f"{username}:{password}".encode()).decode("ascii") }) | ||||
|       else: | ||||
|         c.request('GET', f'/{uri}', headers={ 'User-agent': user_agent }) | ||||
|       res = c.getresponse() | ||||
|       status = res.status | ||||
|       message = res.reason | ||||
|       output = text_from_html(res.read()) | ||||
|  | ||||
|     except Exception as e: | ||||
|       if len(output) == 0: | ||||
|         output = ["Error: {}".format(e)] | ||||
|  | ||||
|   return status, message, output | ||||
|  | ||||
| ################################################################################################### | ||||
| # test if a remote port is open | ||||
| def check_socket(host, port): | ||||
|   with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: | ||||
|     sock.settimeout(10) | ||||
|     if sock.connect_ex((host, port)) == 0: | ||||
|       return True | ||||
|     else: | ||||
|       return False | ||||
|  | ||||
| ################################################################################################### | ||||
| # determine a list of available (non-virtual) adapters (Iface's) | ||||
| def get_available_adapters(): | ||||
|  | ||||
|   available_adapters = [] | ||||
|   _, all_iface_list = run_process("find /sys/class/net/ -mindepth 1 -maxdepth 1 -type l -printf '%P %l\\n'") | ||||
|   available_iface_list = [x.split(" ", 1)[0] for x in all_iface_list if 'virtual' not in x] | ||||
|  | ||||
|   # for each adapter, determine its MAC address and link speed | ||||
|   for adapter in available_iface_list: | ||||
|     mac_address = '??:??:??:??:??:??' | ||||
|     speed = '?' | ||||
|     try: | ||||
|       with open(f"/sys/class/net/{adapter}/address", 'r') as f: | ||||
|         mac_address = f.readline().strip() | ||||
|     except: | ||||
|       pass | ||||
|     try: | ||||
|       with open(f"/sys/class/net/{adapter}/speed", 'r') as f: | ||||
|         speed = f.readline().strip() | ||||
|     except: | ||||
|       pass | ||||
|     description = f"{mac_address} ({speed} Mbits/sec)" | ||||
|     iface = Iface(adapter, description) | ||||
|     available_adapters.append(iface) | ||||
|  | ||||
|   return available_adapters | ||||
|  | ||||
| ################################################################################################### | ||||
| # identify the specified adapter using ethtool --identify | ||||
| def identify_adapter(adapter, duration=NIC_BLINK_SECONDS, background=False): | ||||
|   if background: | ||||
|     subprocess.Popen(["/sbin/ethtool", "--identify", adapter, str(duration)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) | ||||
|   else: | ||||
|     retCode, _ = run_process(f"/sbin/ethtool --identify {adapter} {duration}", stdout=False, stderr=False, timeout=duration*2) | ||||
|     return (retCode == 0) | ||||
|  | ||||
| ################################################################################################### | ||||
| # client that writes to the local instance of protologbeat listening on the configured host/port/protocol | ||||
| class HeatBeatLogger: | ||||
|  | ||||
|   def __init__(self, host='127.0.0.1', port=9515, proto='udp', format='plain', debug=False): | ||||
|     self.host = host | ||||
|     self.port = port | ||||
|     if proto == 'udp': | ||||
|       self.proto = 'udp' | ||||
|       self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) | ||||
|     else: | ||||
|       self.proto = 'tcp' | ||||
|       self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | ||||
|     self.format = format | ||||
|     if self.format not in ['plain','json']: | ||||
|       self.format = 'plain' | ||||
|     self.debug = debug | ||||
|     if self.debug: | ||||
|       print("Creating instance of logger via {} on {}:{}".format(self.proto, self.host, self.port)) | ||||
|  | ||||
|   def enable_debug(self): | ||||
|     self.debug = True | ||||
|  | ||||
|   def send_message(self, msg): | ||||
|     if self.format == 'json': | ||||
|       payload = json.dumps(msg) | ||||
|     else: | ||||
|       payload = msg | ||||
|     if self.debug: | ||||
|       print("Sending message: {}".format(payload.encode('utf-8'))) | ||||
|     self.socket.sendto(payload.encode('utf-8'), (self.host, self.port)) | ||||
							
								
								
									
										139
									
								
								Vagrant/resources/malcolm/shared/bin/sensormetric.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										139
									
								
								Vagrant/resources/malcolm/shared/bin/sensormetric.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,139 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| import subprocess | ||||
| import socket | ||||
| import string | ||||
| from sensorcommon import * | ||||
|  | ||||
| LABEL_HDD = "Storage device" | ||||
| HDDTEMP_PORT_DEFAULT=7634 | ||||
| HDDTEMP_INTERFACE_IP="127.0.0.1" | ||||
|  | ||||
| class Metric(object): | ||||
|  | ||||
|   def __init__(self, adapter_id, sensor_id, sensor_key, value, label): | ||||
|     self._value = self.parse_value(value) | ||||
|     self._adapter_id = adapter_id | ||||
|     self._sensor_id = sensor_id | ||||
|     self._sensor_key = sensor_key | ||||
|     self._label = label | ||||
|     if (label.startswith('Core') or | ||||
|         label.startswith('Processor') or | ||||
|         ((label.startswith('Physical') or label.startswith('Package')) and adapter_id.startswith('core'))): | ||||
|       self._label_class = "cpu" | ||||
|     elif LABEL_HDD in label: | ||||
|       self._label_class = "hdd" | ||||
|     elif "GPU" in label: | ||||
|       self._label_class = "gpu" | ||||
|     elif "DIMM" in label: | ||||
|       self._label_class = "memory" | ||||
|     else: | ||||
|       self._label_class = "other" | ||||
|  | ||||
|   @classmethod | ||||
|   def parse_value(cls, value): | ||||
|     if hasattr(cls, "parse"): | ||||
|       parse = getattr(cls, "parse") | ||||
|       return parse(value) | ||||
|     else: | ||||
|       return value | ||||
|  | ||||
|   def to_dictionary(self): | ||||
|     return { | ||||
|       "name": self._sensor_id, | ||||
|       "adapter": self._adapter_id, | ||||
|       "value": self._value, | ||||
|       "value_type": self.parse.__name__, | ||||
|       "units": getattr(self, "unit", "?"), | ||||
|       "label": self._label, | ||||
|       "class": "%s%s" % (self._label_class, getattr(self, "suffix", "")) | ||||
|     } | ||||
|  | ||||
|   def __repr__(self): | ||||
|     return "%s, %s, %s: %s %s [%s]" % ( | ||||
|       self._adapter_id, | ||||
|       self._sensor_id, | ||||
|       self._sensor_key, | ||||
|       self._value, | ||||
|       getattr(self, "unit", "?"), | ||||
|       self._label) | ||||
|  | ||||
| class TemperatureMetric(Metric): | ||||
|   parse = float | ||||
|   unit = "°C" | ||||
|   suffix = "_temp" | ||||
|  | ||||
| class FanMetric(Metric): | ||||
|   parse = float | ||||
|   unit = "RPM" | ||||
|   suffix = "_rpm" | ||||
|  | ||||
| class VoltageMetric(Metric): | ||||
|   parse = float | ||||
|   unit = "V" | ||||
|   suffix = "_volt" | ||||
|  | ||||
| def metric_cleanup(): | ||||
|   pass | ||||
|  | ||||
| def get_metrics_list(HddTempHost=HDDTEMP_INTERFACE_IP, HddTempPort=HDDTEMP_PORT_DEFAULT): | ||||
|  | ||||
|   # lm-sensors values | ||||
|   try: | ||||
|     output = subprocess.check_output(["/usr/bin/sensors", "-u"], stderr=subprocess.DEVNULL).decode("utf-8").strip() | ||||
|   except Exception as e: | ||||
|     eprint(e) | ||||
|     output = [] | ||||
|   sections = output.split("\n\n") | ||||
|  | ||||
|   metrics = [] | ||||
|   for section in sections: | ||||
|     fields = section.split("\n") | ||||
|     adapter_id = fields[0] | ||||
|  | ||||
|     label = None | ||||
|     for field in fields[2:]: | ||||
|       if field.startswith("  "): | ||||
|         field = field.replace("  ", "") | ||||
|         field_key, field_value = field.split(": ") | ||||
|         if "_" in field_key: | ||||
|           sensor_id, sensor_key = field_key.split("_", 1) | ||||
|           if sensor_key == "input": | ||||
|             if sensor_id.startswith("temp"): | ||||
|               metrics.append(TemperatureMetric(adapter_id, sensor_id, sensor_key, field_value, label=label)) | ||||
|             elif sensor_id.startswith("in"): | ||||
|               metrics.append(VoltageMetric(adapter_id, sensor_id, sensor_key, field_value, label=label)) | ||||
|             elif sensor_id.startswith("fan"): | ||||
|                 metrics.append(FanMetric(adapter_id, sensor_id, sensor_key, field_value, label=label)) | ||||
|       else: | ||||
|         label = field[:-1] # strip off trailing ":" character | ||||
|  | ||||
|  | ||||
|   # connect to hddtemp daemon for HDD temperature monitoring | ||||
|   with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: | ||||
|     try: | ||||
|       try: | ||||
|         s.connect((HDDTEMP_INTERFACE_IP, HDDTEMP_PORT_DEFAULT)) | ||||
|         hdd_temp_line = "" | ||||
|         data = s.recv(4096) | ||||
|         while data: | ||||
|           hdd_temp_line += data.decode('latin-1') | ||||
|           data = s.recv(4096) | ||||
|         for hdd_stats in [x.split('|') for x in hdd_temp_line.strip('|').split('||')]: | ||||
|           if (len(hdd_stats) == 4) and isfloat(hdd_stats[2]): | ||||
|             metrics.append(TemperatureMetric(' '.join(''.join(filter(lambda x: x in string.printable, hdd_stats[1])).split()), | ||||
|                                              hdd_stats[0], | ||||
|                                              'input', | ||||
|                                              hdd_stats[2], | ||||
|                                              label=LABEL_HDD)) | ||||
|       except Exception as e: | ||||
|         eprint(e) | ||||
|         pass | ||||
|     finally: | ||||
|       s.shutdown(2) | ||||
|       s.close() | ||||
|  | ||||
|   return metrics | ||||
							
								
								
									
										62
									
								
								Vagrant/resources/malcolm/shared/bin/ufw_allow_viewer.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										62
									
								
								Vagrant/resources/malcolm/shared/bin/ufw_allow_viewer.sh
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,62 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| # manage a UFW rule for allowing a remote Arkime viewer instance (on the same host | ||||
| # to which moloch-capture is forwarding session logs) to connect to and | ||||
| # retrieve PCAP segments from the local Arkime viewer instance | ||||
|  | ||||
| # works with a comma-separated list of IP addresses in $ARKIME_PACKET_ACL, or | ||||
| # if that variable is not set, a single IP address in $ES_HOST | ||||
|  | ||||
| [[ "$(uname -s)" = 'Darwin' ]] && REALPATH=grealpath || REALPATH=realpath | ||||
| [[ "$(uname -s)" = 'Darwin' ]] && DIRNAME=gdirname || DIRNAME=dirname | ||||
| if ! (type "$REALPATH" && type "$DIRNAME") > /dev/null; then | ||||
|   echo "$(basename "${BASH_SOURCE[0]}") requires $REALPATH and $DIRNAME" | ||||
|   exit 1 | ||||
| fi | ||||
| export SCRIPT_PATH="$($DIRNAME $($REALPATH -e "${BASH_SOURCE[0]}"))" | ||||
|  | ||||
| # control_vars.conf file must be specified as argument to script or be found in an expected place | ||||
| # source configuration variables file if found (precedence: pwd, script directory, /opt/sensor/sensor_ctl) | ||||
| if [[ -n "$1" ]]; then | ||||
|   source "$1" | ||||
| else | ||||
|   CONTROL_VARS_FILE="control_vars.conf" | ||||
|   if [[ -r ./"$CONTROL_VARS_FILE" ]]; then | ||||
|     source ./"$CONTROL_VARS_FILE" | ||||
|   elif [[ -r "$SCRIPT_PATH"/"$CONTROL_VARS_FILE" ]]; then | ||||
|     source "$SCRIPT_PATH"/"$CONTROL_VARS_FILE" | ||||
|   elif [[ -r /opt/sensor/sensor_ctl/"$CONTROL_VARS_FILE" ]]; then | ||||
|     source /opt/sensor/sensor_ctl/"$CONTROL_VARS_FILE" | ||||
|   fi | ||||
| fi | ||||
|  | ||||
| if [[ -z $ARKIME_VIEWER_PORT ]] || ( [[ -z $ARKIME_PACKET_ACL ]] && [[ -z $ES_HOST ]] ); then | ||||
|   echo "Either the remote Arkime viewer host (\$ARKIME_PACKET_ACL or \$ES_HOST) or the local Arkime viewer port (\$ARKIME_VIEWER_PORT) is undefined" | ||||
|   exit 1 | ||||
| elif [[ ! -x /usr/sbin/ufw ]]; then | ||||
|   echo "/usr/sbin/ufw does not exist or is not executable" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| # delete previous UFW rule(s) | ||||
| while read LINE; do | ||||
|   if [[ -n $LINE ]] && [[ "$LINE" =~ ^[0-9]+$ ]]; then | ||||
|     /usr/sbin/ufw --force delete $LINE | ||||
|   fi | ||||
| done <<< "$(/usr/sbin/ufw status numbered | tac | grep "${ARKIME_VIEWER_PORT}/tcp" | sed "s/].*//" | sed "s/[^0-9]*//g")" | ||||
|  | ||||
| # add new UFW rule(s) | ||||
| if [[ -n $ARKIME_PACKET_ACL ]]; then | ||||
|   IFS="," | ||||
|   for IP in $ARKIME_PACKET_ACL; do | ||||
|     /usr/sbin/ufw allow proto tcp from $IP to any port $ARKIME_VIEWER_PORT | ||||
|   done | ||||
|   unset IFS | ||||
| elif [[ -n $ES_HOST ]]; then | ||||
|   /usr/sbin/ufw allow proto tcp from $ES_HOST to any port $ARKIME_VIEWER_PORT | ||||
| fi | ||||
|  | ||||
| # output status of rule | ||||
| /usr/sbin/ufw status | grep "${ARKIME_VIEWER_PORT}/tcp" | ||||
							
								
								
									
										285
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carve_logger.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										285
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carve_logger.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,285 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| ################################################################################################### | ||||
| # Monitor a directory for files extracted by zeek for processing | ||||
| # | ||||
| # Run the script with --help for options | ||||
| ################################################################################################### | ||||
|  | ||||
| import argparse | ||||
| import datetime | ||||
| import json | ||||
| import os | ||||
| import pathlib | ||||
| import re | ||||
| import shutil | ||||
| import signal | ||||
| import sys | ||||
| import time | ||||
| import zmq | ||||
|  | ||||
| from collections import defaultdict | ||||
| from contextlib import nullcontext | ||||
| from datetime import datetime | ||||
| from zeek_carve_utils import * | ||||
|  | ||||
| ################################################################################################### | ||||
| debug = False | ||||
| verboseDebug = False | ||||
| debugToggled = False | ||||
| pdbFlagged = False | ||||
| args = None | ||||
| scriptName = os.path.basename(__file__) | ||||
| scriptPath = os.path.dirname(os.path.realpath(__file__)) | ||||
| origPath = os.getcwd() | ||||
| shuttingDown = False | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigint/sigterm and set a global shutdown variable | ||||
| def shutdown_handler(signum, frame): | ||||
|   global shuttingDown | ||||
|   shuttingDown = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr1 for a pdb breakpoint | ||||
| def pdb_handler(sig, frame): | ||||
|   global pdbFlagged | ||||
|   pdbFlagged = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr2 for toggling debug | ||||
| def debug_toggle_handler(signum, frame): | ||||
|   global debug | ||||
|   global debugToggled | ||||
|   debug = not debug | ||||
|   debugToggled = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # | ||||
| def same_file_or_dir(path1, path2): | ||||
|   try: | ||||
|     return os.path.samefile(path1, path2) | ||||
|   except: | ||||
|     return False | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| def main(): | ||||
|   global args | ||||
|   global debug | ||||
|   global verboseDebug | ||||
|   global debugToggled | ||||
|   global pdbFlagged | ||||
|   global shuttingDown | ||||
|  | ||||
|   parser = argparse.ArgumentParser(description=scriptName, add_help=False, usage='{} <arguments>'.format(scriptName)) | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', help="Verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--extra-verbose', dest='verboseDebug', help="Super verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--start-sleep', dest='startSleepSec', help="Sleep for this many seconds before starting", metavar='<seconds>', type=int, default=0, required=False) | ||||
|   parser.add_argument('--preserve', dest='preserveMode', help=f"File preservation mode (default: {PRESERVE_QUARANTINED})", metavar=f'[{PRESERVE_QUARANTINED}|{PRESERVE_ALL}|{PRESERVE_NONE}]', type=str, default=PRESERVE_QUARANTINED, required=False) | ||||
|   parser.add_argument('--zeek-log', dest='broSigLogSpec', help="Filespec to write Zeek signature log", metavar='<filespec>', type=str, required=False) | ||||
|   requiredNamed = parser.add_argument_group('required arguments') | ||||
|   requiredNamed.add_argument('-d', '--directory', dest='baseDir', help='Directory being monitored', metavar='<directory>', type=str, required=True) | ||||
|  | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   verboseDebug = args.verboseDebug | ||||
|   debug = args.debug or verboseDebug | ||||
|   if debug: | ||||
|     eprint(os.path.join(scriptPath, scriptName)) | ||||
|     eprint("{} arguments: {}".format(scriptName, sys.argv[1:])) | ||||
|     eprint("{} arguments: {}".format(scriptName, args)) | ||||
|   else: | ||||
|     sys.tracebacklimit = 0 | ||||
|  | ||||
|   # determine what to do with scanned files (preserve only "hits", preserve all, preserve none) | ||||
|   args.preserveMode = args.preserveMode.lower() | ||||
|   if (len(args.preserveMode) == 0): | ||||
|     args.preserveMode = PRESERVE_QUARANTINED | ||||
|   elif (args.preserveMode not in [PRESERVE_QUARANTINED, PRESERVE_ALL, PRESERVE_NONE]): | ||||
|     eprint(f'Invalid file preservation mode "{args.preserveMode}"') | ||||
|     sys.exit(1) | ||||
|  | ||||
|   # handle sigint and sigterm for graceful shutdown | ||||
|   signal.signal(signal.SIGINT, shutdown_handler) | ||||
|   signal.signal(signal.SIGTERM, shutdown_handler) | ||||
|   signal.signal(signal.SIGUSR1, pdb_handler) | ||||
|   signal.signal(signal.SIGUSR2, debug_toggle_handler) | ||||
|  | ||||
|   # sleep for a bit if requested | ||||
|   sleepCount = 0 | ||||
|   while (not shuttingDown) and (sleepCount < args.startSleepSec): | ||||
|     time.sleep(1) | ||||
|     sleepCount += 1 | ||||
|  | ||||
|   # where will the fake zeek log file be written to? | ||||
|   broSigLogSpec = args.broSigLogSpec | ||||
|   if broSigLogSpec is not None: | ||||
|     if os.path.isdir(broSigLogSpec): | ||||
|       # _carved tag will be recognized by 11_zeek_logs.conf in logstash | ||||
|       broSigLogSpec = os.path.join(broSigLogSpec, "signatures(_carved).log") | ||||
|     else: | ||||
|       # make sure path to write to zeek signatures log file exists before we start writing | ||||
|       pathlib.Path(os.path.dirname(os.path.realpath(broSigLogSpec))).mkdir(parents=True, exist_ok=True) | ||||
|  | ||||
|   # create quarantine/preserved directories for preserved files (see preserveMode) | ||||
|   quarantineDir = os.path.join(args.baseDir, PRESERVE_QUARANTINED_DIR_NAME) | ||||
|   preserveDir = os.path.join(args.baseDir, PRESERVE_PRESERVED_DIR_NAME) | ||||
|   if (args.preserveMode != PRESERVE_NONE) and (not os.path.isdir(quarantineDir)): | ||||
|     if debug: eprint(f'Creating "{quarantineDir}" for quarantined files') | ||||
|     pathlib.Path(quarantineDir).mkdir(parents=False, exist_ok=True) | ||||
|   if (args.preserveMode == PRESERVE_ALL) and (not os.path.isdir(preserveDir)): | ||||
|     if debug: eprint(f'Creating "{preserveDir}" for other preserved files') | ||||
|     pathlib.Path(preserveDir).mkdir(parents=False, exist_ok=True) | ||||
|  | ||||
|   # initialize ZeroMQ context and socket(s) to send messages to | ||||
|   context = zmq.Context() | ||||
|  | ||||
|   # Socket to receive scan results on | ||||
|   scanned_files_socket = context.socket(zmq.PULL) | ||||
|   scanned_files_socket.bind(f"tcp://*:{SINK_PORT}") | ||||
|   scanned_files_socket.SNDTIMEO = 5000 | ||||
|   scanned_files_socket.RCVTIMEO = 5000 | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}: bound sink port {SINK_PORT}") | ||||
|  | ||||
|   scanners = set() | ||||
|   fileScanCounts = defaultdict(AtomicInt) | ||||
|   fileScanHits = defaultdict(AtomicInt) | ||||
|  | ||||
|   # open and write out header for our super legit zeek signature.log file | ||||
|   with open(broSigLogSpec, 'w+', 1) if (broSigLogSpec is not None) else nullcontext() as broSigFile: | ||||
|     if (broSigFile is not None): | ||||
|       print('#separator \\x09', file=broSigFile, end='\n') | ||||
|       print('#set_separator\t,', file=broSigFile, end='\n') | ||||
|       print('#empty_field\t(empty)', file=broSigFile, end='\n') | ||||
|       print('#unset_field\t-', file=broSigFile, end='\n') | ||||
|       print('#path\tsignature', file=broSigFile, end='\n') | ||||
|       print(f'#open\t{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}', file=broSigFile, end='\n') | ||||
|       print(re.sub(r"\b((orig|resp)_[hp])\b", r"id.\1", | ||||
|                    f"#fields\t{BroSignatureLine.signature_format_line()}".replace('{', '').replace('}', '')), | ||||
|             file=broSigFile, end='\n') | ||||
|       print(f'#types\t{BroSignatureLine.signature_types_line()}', file=broSigFile, end='\n') | ||||
|  | ||||
|     while (not shuttingDown): | ||||
|  | ||||
|       if pdbFlagged: | ||||
|         pdbFlagged = False | ||||
|         breakpoint() | ||||
|  | ||||
|       triggered = False | ||||
|       try: | ||||
|         scanResult = json.loads(scanned_files_socket.recv_string()) | ||||
|         if debug: eprint(f"{scriptName}:\t📨\t{scanResult}") | ||||
|       except zmq.Again as timeout: | ||||
|         scanResult = None | ||||
|         if verboseDebug: eprint(f"{scriptName}:\t🕑\t(recv)") | ||||
|  | ||||
|       if isinstance(scanResult, dict): | ||||
|  | ||||
|         # register/deregister scanners | ||||
|         if (FILE_SCAN_RESULT_SCANNER in scanResult): | ||||
|           scanner = scanResult[FILE_SCAN_RESULT_SCANNER].lower() | ||||
|           if scanner.startswith('-'): | ||||
|             if debug: eprint(f"{scriptName}:\t🙃\t{scanner[1:]}") | ||||
|             try: | ||||
|               scanners.remove(scanner[1:]) | ||||
|             except KeyError: | ||||
|               pass | ||||
|           else: | ||||
|             if debug and (scanner not in scanners): eprint(f"{scriptName}:\t🇷\t{scanner}") | ||||
|             scanners.add(scanner) | ||||
|  | ||||
|         # process scan results | ||||
|         if all (k in scanResult for k in (FILE_SCAN_RESULT_SCANNER, | ||||
|                                           FILE_SCAN_RESULT_FILE, | ||||
|                                           FILE_SCAN_RESULT_ENGINES, | ||||
|                                           FILE_SCAN_RESULT_HITS, | ||||
|                                           FILE_SCAN_RESULT_MESSAGE, | ||||
|                                           FILE_SCAN_RESULT_DESCRIPTION)): | ||||
|  | ||||
|           triggered = (scanResult[FILE_SCAN_RESULT_HITS] > 0) | ||||
|           fileName = scanResult[FILE_SCAN_RESULT_FILE] | ||||
|           fileNameBase = os.path.basename(fileName) | ||||
|  | ||||
|           # we won't delete or move/quarantine a file until fileScanCount < len(scanners) | ||||
|           fileScanCount = fileScanCounts[fileNameBase].increment() | ||||
|  | ||||
|           if triggered: | ||||
|             # this file had a "hit" in one of the virus engines, log it! | ||||
|             fileScanHitCount = fileScanHits[fileNameBase].increment() | ||||
|  | ||||
|             # format the line as it should appear in the signatures log file | ||||
|             fileSpecFields = extracted_filespec_to_fields(fileName) | ||||
|             broLine = BroSignatureLine(ts=f"{fileSpecFields.time}", | ||||
|                                        uid=fileSpecFields.uid if fileSpecFields.uid is not None else '-', | ||||
|                                        note=ZEEK_SIGNATURE_NOTICE, | ||||
|                                        signature_id=scanResult[FILE_SCAN_RESULT_MESSAGE], | ||||
|                                        event_message=scanResult[FILE_SCAN_RESULT_DESCRIPTION], | ||||
|                                        sub_message=fileSpecFields.fid if fileSpecFields.fid is not None else os.path.basename(fileName), | ||||
|                                        signature_count=scanResult[FILE_SCAN_RESULT_HITS], | ||||
|                                        host_count=scanResult[FILE_SCAN_RESULT_ENGINES]) | ||||
|             broLineStr = str(broLine) | ||||
|  | ||||
|             # write broLineStr event line out to the signatures log file or to stdout | ||||
|             if (broSigFile is not None): | ||||
|               print(broLineStr, file=broSigFile, end='\n', flush=True) | ||||
|             else: | ||||
|               print(broLineStr, file=broSigFile, flush=True) | ||||
|  | ||||
|           else: | ||||
|             fileScanHitCount = fileScanHits[fileNameBase].value() | ||||
|  | ||||
|           # finally, what to do with the file itself | ||||
|           if os.path.isfile(fileName): | ||||
|  | ||||
|             # once all of the scanners have had their turn... | ||||
|             if (fileScanCount >= len(scanners)): | ||||
|               fileScanCounts.pop(fileNameBase, None) | ||||
|               fileScanHits.pop(fileNameBase, None) | ||||
|  | ||||
|               if (fileScanHitCount > 0) and (args.preserveMode != PRESERVE_NONE): | ||||
|  | ||||
|                 # move triggering file to quarantine | ||||
|                 if not same_file_or_dir(fileName, os.path.join(quarantineDir, fileNameBase)): # unless it's somehow already there | ||||
|  | ||||
|                   try: | ||||
|                     shutil.move(fileName, quarantineDir) | ||||
|                     if debug: eprint(f"{scriptName}:\t⏩\t{fileName} ({fileScanCount}/{len(scanners)})") | ||||
|                   except Exception as e: | ||||
|                     eprint(f"{scriptName}:\t❗\t🚫\t{fileName} move exception: {e}") | ||||
|                     # hm move failed, delete it i guess? | ||||
|                     os.remove(fileName) | ||||
|  | ||||
|               else: | ||||
|                 if not same_file_or_dir(quarantineDir, os.path.dirname(fileName)): # don't move or delete if it's somehow already quarantined | ||||
|  | ||||
|                   if (args.preserveMode == PRESERVE_ALL): | ||||
|                     # move non-triggering file to preserved directory | ||||
|                     try: | ||||
|                       shutil.move(fileName, preserveDir) | ||||
|                       if verboseDebug: eprint(f"{scriptName}:\t⏩\t{fileName} ({fileScanCount}/{len(scanners)})") | ||||
|                     except Exception as e: | ||||
|                       eprint(f"{scriptName}:\t❗\t🚫\t{fileName} move exception: {e}") | ||||
|                       # hm move failed, delete it i guess? | ||||
|                       os.remove(fileName) | ||||
|  | ||||
|                   else: | ||||
|                     # delete the file | ||||
|                     os.remove(fileName) | ||||
|                     if verboseDebug: eprint(f"{scriptName}:\t🚫\t{fileName} ({fileScanCount}/{len(scanners)})") | ||||
|  | ||||
|   # graceful shutdown | ||||
|   if debug: | ||||
|     eprint(f"{scriptName}: shutting down...") | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										312
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carve_scanner.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										312
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carve_scanner.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,312 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| ################################################################################################### | ||||
| # Process queued files reported by zeek_carve_watcher.py, scanning them with the specified | ||||
| # virus scan engine and sending the results along to zeek_carve_logger.py | ||||
| # | ||||
| # Run the script with --help for options | ||||
| ################################################################################################### | ||||
|  | ||||
| import argparse | ||||
| import os | ||||
| import pathlib | ||||
| import json | ||||
| import signal | ||||
| import sys | ||||
| import threading | ||||
| import time | ||||
| import zmq | ||||
|  | ||||
| from zeek_carve_utils import * | ||||
| from multiprocessing.pool import ThreadPool | ||||
|  | ||||
| ################################################################################################### | ||||
| debug = False | ||||
| verboseDebug = False | ||||
| debugToggled = False | ||||
| pdbFlagged = False | ||||
| args = None | ||||
| scriptName = os.path.basename(__file__) | ||||
| scriptPath = os.path.dirname(os.path.realpath(__file__)) | ||||
| origPath = os.getcwd() | ||||
| shuttingDown = False | ||||
| scanWorkersCount = AtomicInt(value=0) | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigint/sigterm and set a global shutdown variable | ||||
| def shutdown_handler(signum, frame): | ||||
|   global shuttingDown | ||||
|   shuttingDown = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr1 for a pdb breakpoint | ||||
| def pdb_handler(sig, frame): | ||||
|   global pdbFlagged | ||||
|   pdbFlagged = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr2 for toggling debug | ||||
| def debug_toggle_handler(signum, frame): | ||||
|   global debug | ||||
|   global debugToggled | ||||
|   debug = not debug | ||||
|   debugToggled = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # look for a file to scan (probably in its original directory, but possibly already moved to quarantine) | ||||
| def locate_file(fileInfo): | ||||
|   global verboseDebug | ||||
|  | ||||
|   if isinstance(fileInfo, dict) and (FILE_SCAN_RESULT_FILE in fileInfo): | ||||
|     fileName = fileInfo[FILE_SCAN_RESULT_FILE] | ||||
|   elif isinstance(fileInfo, str): | ||||
|     fileName = fileInfo | ||||
|   else: | ||||
|     fileName = None | ||||
|  | ||||
|   if fileName is not None: | ||||
|  | ||||
|     if os.path.isfile(fileName): | ||||
|       return fileName | ||||
|  | ||||
|     else: | ||||
|       for testPath in [PRESERVE_QUARANTINED_DIR_NAME, PRESERVE_PRESERVED_DIR_NAME]: | ||||
|         testFileName = os.path.join(os.path.join(os.path.dirname(os.path.realpath(fileName)), testPath), os.path.basename(fileName)) | ||||
|         if os.path.isfile(testFileName): | ||||
|           if verboseDebug: eprint(f"{scriptName}:\t⏩\t{testFileName}") | ||||
|           return testFileName | ||||
|  | ||||
|   return None | ||||
|  | ||||
|  | ||||
| ################################################################################################### | ||||
| def scanFileWorker(checkConnInfo, carvedFileSub): | ||||
|   global debug | ||||
|   global verboseDebug | ||||
|   global shuttingDown | ||||
|   global scanWorkersCount | ||||
|  | ||||
|   scanWorkerId = scanWorkersCount.increment() # unique ID for this thread | ||||
|   scannerRegistered = False | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\tstarted") | ||||
|  | ||||
|   try: | ||||
|     if isinstance(checkConnInfo, FileScanProvider): | ||||
|  | ||||
|       # initialize ZeroMQ context and socket(s) to send scan results | ||||
|       context = zmq.Context() | ||||
|  | ||||
|       # Socket to send messages to | ||||
|       scanned_files_socket = context.socket(zmq.PUSH) | ||||
|       scanned_files_socket.connect(f"tcp://localhost:{SINK_PORT}") | ||||
|       # todo: do I want to set this? probably not, since what else would we do if we can't send? just block | ||||
|       # scanned_files_socket.SNDTIMEO = 5000 | ||||
|       if debug: eprint(f"{scriptName}[{scanWorkerId}]:\tconnected to sink at {SINK_PORT}") | ||||
|  | ||||
|       fileInfo = None | ||||
|       fileName = None | ||||
|       retrySubmitFile = False # todo: maximum file retry count? | ||||
|  | ||||
|       # loop forever, or until we're told to shut down | ||||
|       while not shuttingDown: | ||||
|  | ||||
|         # "register" this scanner with the logger | ||||
|         while (not scannerRegistered) and (not shuttingDown): | ||||
|           try: | ||||
|             scanned_files_socket.send_string(json.dumps({FILE_SCAN_RESULT_SCANNER : checkConnInfo.scanner_name()})) | ||||
|             scannerRegistered = True | ||||
|             if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t🇷\t{checkConnInfo.scanner_name()}") | ||||
|  | ||||
|           except zmq.Again as timeout: | ||||
|             # todo: what to do here? | ||||
|             if verboseDebug: eprint(f"{scriptName}[{scanWorkerId}]:\t🕑\t{checkConnInfo.scanner_name()} 🇷") | ||||
|  | ||||
|         if shuttingDown: | ||||
|           break | ||||
|  | ||||
|         if retrySubmitFile and (fileInfo is not None) and (locate_file(fileInfo) is not None): | ||||
|           # we were unable to submit the file for processing, so try again | ||||
|           time.sleep(1) | ||||
|           if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t🔃\t{json.dumps(fileInfo)}") | ||||
|  | ||||
|         else: | ||||
|           retrySubmitFile = False | ||||
|           # read watched file information from the subscription | ||||
|           fileInfo = carvedFileSub.Pull(scanWorkerId=scanWorkerId) | ||||
|  | ||||
|         fileName = locate_file(fileInfo) | ||||
|         if (fileName is not None) and os.path.isfile(fileName): | ||||
|  | ||||
|           # file exists, submit for scanning | ||||
|           if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t🔎\t{json.dumps(fileInfo)}") | ||||
|           requestComplete = False | ||||
|           scanResult = None | ||||
|           fileSize = int(fileInfo[FILE_SCAN_RESULT_FILE_SIZE]) if isinstance(fileInfo[FILE_SCAN_RESULT_FILE_SIZE], int) or (isinstance(fileInfo[FILE_SCAN_RESULT_FILE_SIZE], str) and fileInfo[FILE_SCAN_RESULT_FILE_SIZE].isdecimal()) else None | ||||
|           scan = AnalyzerScan(provider=checkConnInfo, name=fileName, | ||||
|                               size=fileSize, | ||||
|                               fileType=fileInfo[FILE_SCAN_RESULT_FILE_TYPE], | ||||
|                               submissionResponse=checkConnInfo.submit(fileName=fileName, fileSize=fileSize, fileType=fileInfo[FILE_SCAN_RESULT_FILE_TYPE], block=False)) | ||||
|           if scan.submissionResponse is not None: | ||||
|             if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t🔍\t{fileName}") | ||||
|  | ||||
|             # file was successfully submitted and is now being scanned | ||||
|             retrySubmitFile = False | ||||
|             requestComplete = False | ||||
|  | ||||
|             # todo: maximum time we wait for a single file to be scanned? | ||||
|             while (not requestComplete) and (not shuttingDown): | ||||
|  | ||||
|               # wait a moment then check to see if the scan is complete | ||||
|               time.sleep(scan.provider.check_interval()) | ||||
|               response = scan.provider.check_result(scan.submissionResponse) | ||||
|  | ||||
|               if isinstance(response, AnalyzerResult): | ||||
|  | ||||
|                 # whether the scan has completed | ||||
|                 requestComplete = response.finished | ||||
|  | ||||
|                 if response.success: | ||||
|                   # successful scan, report the scan results | ||||
|                   scanResult = response | ||||
|  | ||||
|                 elif isinstance(response.result, dict) and ("error" in response.result): | ||||
|                   # scan errored out, report the error | ||||
|                   scanResult = response.result["error"] | ||||
|                   eprint(f"{scriptName}[{scanWorkerId}]:\t❗\t{fileName} {scanResult}") | ||||
|  | ||||
|                 else: | ||||
|                   # result is unrecognizable | ||||
|                   scanResult = "Invalid scan result format" | ||||
|                   eprint(f"{scriptName}[{scanWorkerId}]:\t❗\t{fileName} {scanResult}") | ||||
|  | ||||
|               else: | ||||
|                 # impossibru! abandon ship for this file? | ||||
|                 # todo? what else? touch it? | ||||
|                 requestComplete = True | ||||
|                 scanResult = "Error checking results" | ||||
|                 eprint(f"{scriptName}[{scanWorkerId}]:\t❗{fileName} {scanResult}") | ||||
|  | ||||
|           else: | ||||
|             # we were denied (rate limiting, probably), so we'll need wait for a slot to clear up | ||||
|             retrySubmitFile = True | ||||
|  | ||||
|           if requestComplete and (scanResult is not None): | ||||
|             try: | ||||
|               # Send results to sink | ||||
|               scanned_files_socket.send_string(json.dumps(scan.provider.format(fileName, scanResult))) | ||||
|               if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t✅\t{fileName}") | ||||
|  | ||||
|             except zmq.Again as timeout: | ||||
|               # todo: what to do here? | ||||
|               if verboseDebug: eprint(f"{scriptName}[{scanWorkerId}]:\t🕑\t{fileName}") | ||||
|  | ||||
|     else: | ||||
|       eprint(f"{scriptName}[{scanWorkerId}]:\tinvalid scanner provider specified") | ||||
|  | ||||
|   finally: | ||||
|     # "unregister" this scanner with the logger | ||||
|     if scannerRegistered: | ||||
|       try: | ||||
|         scanned_files_socket.send_string(json.dumps({FILE_SCAN_RESULT_SCANNER : f"-{checkConnInfo.scanner_name()}"})) | ||||
|         scannerRegistered = False | ||||
|         if debug: eprint(f"{scriptName}[{scanWorkerId}]:\t🙃\t{checkConnInfo.scanner_name()}") | ||||
|       except zmq.Again as timeout: | ||||
|         # todo: what to do here? | ||||
|         if verboseDebug: eprint(f"{scriptName}[{scanWorkerId}]:\t🕑\t{checkConnInfo.scanner_name()} 🙃") | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}[{scanWorkerId}]:\tfinished") | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| def main(): | ||||
|   global args | ||||
|   global debug | ||||
|   global debugToggled | ||||
|   global pdbFlagged | ||||
|   global shuttingDown | ||||
|   global verboseDebug | ||||
|  | ||||
|   parser = argparse.ArgumentParser(description=scriptName, add_help=False, usage='{} <arguments>'.format(scriptName)) | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', help="Verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--extra-verbose', dest='verboseDebug', help="Super verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--start-sleep', dest='startSleepSec', help="Sleep for this many seconds before starting", metavar='<seconds>', type=int, default=0, required=False) | ||||
|   parser.add_argument('--req-limit', dest='reqLimit', help="Requests limit", metavar='<requests>', type=int, default=None, required=False) | ||||
|   parser.add_argument('--malass-host', dest='malassHost', help="Malass host or IP address", metavar='<host>', type=str, required=False) | ||||
|   parser.add_argument('--malass-port', dest='malassPort', help="Malass web interface port", metavar='<port>', type=int, default=80, required=False) | ||||
|   parser.add_argument('--vtot-api', dest='vtotApi', help="VirusTotal API key", metavar='<API key>', type=str, required=False) | ||||
|   parser.add_argument('--clamav', dest='enableClamAv', metavar='true|false', help="Enable ClamAV", type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--clamav-socket', dest='clamAvSocket', help="ClamAV socket filename", metavar='<filespec>', type=str, required=False, default=None) | ||||
|   parser.add_argument('--yara', dest='enableYara', metavar='true|false', help="Enable Yara", type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--yara-custom-only', dest='yaraCustomOnly', metavar='true|false', help="Ignore default Yara rules", type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--capa', dest='enableCapa', metavar='true|false', help="Enable Capa", type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--capa-rules', dest='capaRulesDir', help="Capa Rules Directory", metavar='<pathspec>', type=str, required=False) | ||||
|   parser.add_argument('--capa-verbose', dest='capaVerbose', metavar='true|false', help="Log all capa rules, not just MITRE ATT&CK technique classifications", type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|  | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   verboseDebug = args.verboseDebug | ||||
|   debug = args.debug or verboseDebug | ||||
|   if debug: | ||||
|     eprint(os.path.join(scriptPath, scriptName)) | ||||
|     eprint("{} arguments: {}".format(scriptName, sys.argv[1:])) | ||||
|     eprint("{} arguments: {}".format(scriptName, args)) | ||||
|   else: | ||||
|     sys.tracebacklimit = 0 | ||||
|  | ||||
|   # handle sigint and sigterm for graceful shutdown | ||||
|   signal.signal(signal.SIGINT, shutdown_handler) | ||||
|   signal.signal(signal.SIGTERM, shutdown_handler) | ||||
|   signal.signal(signal.SIGUSR1, pdb_handler) | ||||
|   signal.signal(signal.SIGUSR2, debug_toggle_handler) | ||||
|  | ||||
|   # sleep for a bit if requested | ||||
|   sleepCount = 0 | ||||
|   while (not shuttingDown) and (sleepCount < args.startSleepSec): | ||||
|     time.sleep(1) | ||||
|     sleepCount += 1 | ||||
|  | ||||
|   # intialize objects for virus scanning engines | ||||
|   if (isinstance(args.malassHost, str) and (len(args.malassHost) > 1)): | ||||
|     checkConnInfo = MalassScan(args.malassHost, args.malassPort, reqLimit=args.reqLimit) | ||||
|   elif (isinstance(args.vtotApi, str) and (len(args.vtotApi) > 1) and (args.vtotReqLimit > 0)): | ||||
|     checkConnInfo = VirusTotalSearch(args.vtotApi, reqLimit=args.reqLimit) | ||||
|   elif args.enableYara: | ||||
|     yaraDirs = [] | ||||
|     if (not args.yaraCustomOnly): | ||||
|       yaraDirs.append(YARA_RULES_DIR) | ||||
|     yaraDirs.append(YARA_CUSTOM_RULES_DIR) | ||||
|     checkConnInfo = YaraScan(debug=debug, verboseDebug=verboseDebug, rulesDirs=yaraDirs, reqLimit=args.reqLimit) | ||||
|   elif args.enableCapa: | ||||
|     checkConnInfo = CapaScan(debug=debug, verboseDebug=verboseDebug, rulesDir=args.capaRulesDir, verboseHits=args.capaVerbose, reqLimit=args.reqLimit) | ||||
|   else: | ||||
|     if not args.enableClamAv: | ||||
|       eprint('No scanner specified, defaulting to ClamAV') | ||||
|     checkConnInfo = ClamAVScan(debug=debug, verboseDebug=verboseDebug, socketFileName=args.clamAvSocket, reqLimit=args.reqLimit) | ||||
|  | ||||
|   carvedFileSub = CarvedFileSubscriberThreaded(debug=debug, verboseDebug=verboseDebug, | ||||
|                                                host='localhost', port=VENTILATOR_PORT, | ||||
|                                                scriptName=scriptName) | ||||
|  | ||||
|   # start scanner threads which will pull filenames to be scanned and send the results to the logger | ||||
|   scannerThreads = ThreadPool(checkConnInfo.max_requests(), scanFileWorker, ([checkConnInfo, carvedFileSub])) | ||||
|   while (not shuttingDown): | ||||
|     if pdbFlagged: | ||||
|       pdbFlagged = False | ||||
|       breakpoint() | ||||
|     time.sleep(0.2) | ||||
|  | ||||
|   # graceful shutdown | ||||
|   if debug: eprint(f"{scriptName}: shutting down...") | ||||
|   time.sleep(5) | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										1094
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carve_utils.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1094
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carve_utils.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										236
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carve_watcher.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										236
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carve_watcher.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,236 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| ################################################################################################### | ||||
| # Monitor a directory for files extracted by zeek for processing | ||||
| # | ||||
| # Run the script with --help for options | ||||
| ################################################################################################### | ||||
|  | ||||
| import argparse | ||||
| import copy | ||||
| import glob | ||||
| import json | ||||
| import magic | ||||
| import os | ||||
| import pathlib | ||||
| import pyinotify | ||||
| import signal | ||||
| import sys | ||||
| import time | ||||
| import zmq | ||||
|  | ||||
| from zeek_carve_utils import * | ||||
|  | ||||
| ################################################################################################### | ||||
| MINIMUM_CHECKED_FILE_SIZE_DEFAULT = 64 | ||||
| MAXIMUM_CHECKED_FILE_SIZE_DEFAULT = 134217728 | ||||
|  | ||||
| ################################################################################################### | ||||
| debug = False | ||||
| verboseDebug = False | ||||
| pdbFlagged = False | ||||
| args = None | ||||
| scriptName = os.path.basename(__file__) | ||||
| scriptPath = os.path.dirname(os.path.realpath(__file__)) | ||||
| origPath = os.getcwd() | ||||
| shuttingDown = False | ||||
|  | ||||
| ################################################################################################### | ||||
| # watch files written to and moved to this directory | ||||
| class EventWatcher(pyinotify.ProcessEvent): | ||||
|  | ||||
|   # notify on files written in-place then closed (IN_CLOSE_WRITE), and moved into this directory (IN_MOVED_TO) | ||||
|   _methods = ["IN_CLOSE_WRITE", "IN_MOVED_TO"] | ||||
|  | ||||
|   def __init__(self): | ||||
|     global debug | ||||
|  | ||||
|     super().__init__() | ||||
|  | ||||
|     # initialize ZeroMQ context and socket(s) to send messages to | ||||
|     self.context = zmq.Context() | ||||
|  | ||||
|     # Socket to send messages on | ||||
|     if debug: eprint(f"{scriptName}:\tbinding ventilator port {VENTILATOR_PORT}") | ||||
|     self.ventilator_socket = self.context.socket(zmq.PUB) | ||||
|     self.ventilator_socket.bind(f"tcp://*:{VENTILATOR_PORT}") | ||||
|  | ||||
|     # todo: do I want to set this? probably not since this guy's whole job is to send | ||||
|     # and if he can't then what's the point? just block | ||||
|     # self.ventilator_socket.SNDTIMEO = 5000 | ||||
|  | ||||
|     if debug: eprint(f"{scriptName}:\tEventWatcher initialized") | ||||
|  | ||||
| ################################################################################################### | ||||
| # set up event processor to append processed events from to the event queue | ||||
| def event_process_generator(cls, method): | ||||
|  | ||||
|   # actual method called when we are notified of a file | ||||
|   def _method_name(self, event): | ||||
|  | ||||
|     global args | ||||
|     global debug | ||||
|     global verboseDebug | ||||
|  | ||||
|     if debug: eprint(f"{scriptName}:\t👓\t{event.pathname}") | ||||
|  | ||||
|     if (not event.dir) and os.path.isfile(event.pathname): | ||||
|  | ||||
|       fileSize = os.path.getsize(event.pathname) | ||||
|       if (args.minBytes <= fileSize <= args.maxBytes): | ||||
|  | ||||
|         fileType = magic.from_file(event.pathname, mime=True) | ||||
|         if (pathlib.Path(event.pathname).suffix != CAPA_VIV_SUFFIX) and (fileType != CAPA_VIV_MIME): | ||||
|           # the entity is a right-sized file, is not a capa .viv cache file, and it exists, so send it to get scanned | ||||
|  | ||||
|           fileInfo = json.dumps({ FILE_SCAN_RESULT_FILE : event.pathname, | ||||
|                                   FILE_SCAN_RESULT_FILE_SIZE : fileSize, | ||||
|                                   FILE_SCAN_RESULT_FILE_TYPE : fileType }) | ||||
|           if debug: eprint(f"{scriptName}:\t📩\t{fileInfo}") | ||||
|           try: | ||||
|             self.ventilator_socket.send_string(fileInfo) | ||||
|             if debug: eprint(f"{scriptName}:\t📫\t{event.pathname}") | ||||
|           except zmq.Again as timeout: | ||||
|             if verboseDebug: eprint(f"{scriptName}:\t🕑\t{event.pathname}") | ||||
|  | ||||
|         else: | ||||
|           # temporary capa .viv file, just ignore it as it will get cleaned up by the scanner when it's done | ||||
|           if debug: eprint(f"{scriptName}:\t🚧\t{event.pathname}") | ||||
|  | ||||
|       else: | ||||
|         # too small/big to care about, delete it | ||||
|         os.remove(event.pathname) | ||||
|         if debug: eprint(f"{scriptName}:\t🚫\t{event.pathname}") | ||||
|  | ||||
|   # assign process method to class | ||||
|   _method_name.__name__ = "process_{}".format(method) | ||||
|   setattr(cls, _method_name.__name__, _method_name) | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigint/sigterm and set a global shutdown variable | ||||
| def shutdown_handler(signum, frame): | ||||
|   global shuttingDown | ||||
|   shuttingDown = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr1 for a pdb breakpoint | ||||
| def pdb_handler(sig, frame): | ||||
|   global pdbFlagged | ||||
|   pdbFlagged = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # handle sigusr2 for toggling debug | ||||
| def debug_toggle_handler(signum, frame): | ||||
|   global debug | ||||
|   global debugToggled | ||||
|   debug = not debug | ||||
|   debugToggled = True | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| def main(): | ||||
|   global args | ||||
|   global debug | ||||
|   global verboseDebug | ||||
|   global debugToggled | ||||
|   global pdbFlagged | ||||
|   global shuttingDown | ||||
|  | ||||
|   parser = argparse.ArgumentParser(description=scriptName, add_help=False, usage='{} <arguments>'.format(scriptName)) | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', help="Verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--extra-verbose', dest='verboseDebug', help="Super verbose output", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--ignore-existing', dest='ignoreExisting', help="Ignore preexisting files in the monitor directory", metavar='true|false', type=str2bool, nargs='?', const=True, default=False, required=False) | ||||
|   parser.add_argument('--start-sleep', dest='startSleepSec', help="Sleep for this many seconds before starting", metavar='<seconds>', type=int, default=0, required=False) | ||||
|   parser.add_argument('-r', '--recursive-directory', dest='recursiveDir', help="If specified, monitor all directories with this name underneath --directory", metavar='<name>', type=str, required=False) | ||||
|   parser.add_argument('--min-bytes', dest='minBytes', help="Minimum size for checked files", metavar='<bytes>', type=int, default=MINIMUM_CHECKED_FILE_SIZE_DEFAULT, required=False) | ||||
|   parser.add_argument('--max-bytes', dest='maxBytes', help="Maximum size for checked files", metavar='<bytes>', type=int, default=MAXIMUM_CHECKED_FILE_SIZE_DEFAULT, required=False) | ||||
|   requiredNamed = parser.add_argument_group('required arguments') | ||||
|   requiredNamed.add_argument('-d', '--directory', dest='baseDir', help='Directory to monitor', metavar='<directory>', type=str, required=True) | ||||
|  | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   verboseDebug = args.verboseDebug | ||||
|   debug = args.debug or verboseDebug | ||||
|   if debug: | ||||
|     eprint(os.path.join(scriptPath, scriptName)) | ||||
|     eprint("{} arguments: {}".format(scriptName, sys.argv[1:])) | ||||
|     eprint("{} arguments: {}".format(scriptName, args)) | ||||
|   else: | ||||
|     sys.tracebacklimit = 0 | ||||
|  | ||||
|   # handle sigint and sigterm for graceful shutdown | ||||
|   signal.signal(signal.SIGINT, shutdown_handler) | ||||
|   signal.signal(signal.SIGTERM, shutdown_handler) | ||||
|   signal.signal(signal.SIGUSR1, pdb_handler) | ||||
|   signal.signal(signal.SIGUSR2, debug_toggle_handler) | ||||
|  | ||||
|   # sleep for a bit if requested | ||||
|   sleepCount = 0 | ||||
|   while (not shuttingDown) and (sleepCount < args.startSleepSec): | ||||
|     time.sleep(1) | ||||
|     sleepCount += 1 | ||||
|  | ||||
|   # add events to watch to EventWatcher class | ||||
|   for method in EventWatcher._methods: | ||||
|     event_process_generator(EventWatcher, method) | ||||
|  | ||||
|   # if directory to monitor doesn't exist, create it now | ||||
|   if os.path.isdir(args.baseDir): | ||||
|     preexistingDir = True | ||||
|   else: | ||||
|     preexistingDir = False | ||||
|     if debug: eprint(f'{scriptname}: creating "{args.baseDir}" to monitor') | ||||
|     pathlib.Path(args.baseDir).mkdir(parents=False, exist_ok=True) | ||||
|  | ||||
|   # if recursion was requested, get list of directories to monitor | ||||
|   watchDirs = [] | ||||
|   while (len(watchDirs) == 0): | ||||
|     if args.recursiveDir is None: | ||||
|       watchDirs = [args.baseDir] | ||||
|     else: | ||||
|       watchDirs = glob.glob(f'{args.baseDir}/**/{args.recursiveDir}', recursive=True) | ||||
|  | ||||
|   # begin threaded watch of path(s) | ||||
|   time.sleep(1) | ||||
|   watch_manager = pyinotify.WatchManager() | ||||
|   event_notifier = pyinotify.ThreadedNotifier(watch_manager, EventWatcher()) | ||||
|   for watchDir in watchDirs: | ||||
|     watch_manager.add_watch(os.path.abspath(watchDir), pyinotify.ALL_EVENTS) | ||||
|   if debug: eprint(f"{scriptName}: monitoring {watchDirs}") | ||||
|   time.sleep(2) | ||||
|   event_notifier.start() | ||||
|  | ||||
|   # if there are any previously included files (and not ignoreExisting), "touch" them so that they will be notified on | ||||
|   if preexistingDir and (not args.ignoreExisting): | ||||
|     filesTouched = 0 | ||||
|     for watchDir in watchDirs: | ||||
|       for preexistingFile in [os.path.join(watchDir, x) for x in pathlib.Path(watchDir).iterdir() if x.is_file()]: | ||||
|         touch(preexistingFile) | ||||
|         filesTouched += 1 | ||||
|     if debug and (filesTouched > 0): | ||||
|       eprint(f"{scriptName}: found {filesTouched} preexisting files to check") | ||||
|  | ||||
|   # loop forever, or until we're told to shut down, whichever comes first | ||||
|   while (not shuttingDown): | ||||
|     if pdbFlagged: | ||||
|       pdbFlagged = False | ||||
|       breakpoint() | ||||
|     time.sleep(0.2) | ||||
|  | ||||
|   # graceful shutdown | ||||
|   if debug: eprint(f"{scriptName}: shutting down...") | ||||
|   event_notifier.stop() | ||||
|   time.sleep(1) | ||||
|  | ||||
|   if debug: eprint(f"{scriptName}: finished monitoring {watchDirs}") | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										182
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carved_http_server.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										182
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_carved_http_server.py
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,182 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # -*- coding: utf-8 -*- | ||||
|  | ||||
| # Multithreaded simple HTTP directory server. | ||||
| # | ||||
| # The files can optionally be aes-256-cbc encrypted in a way that's compatible with: | ||||
| #   openssl enc -aes-256-cbc -d -in encrypted.data -out decrypted.data | ||||
|  | ||||
| import argparse | ||||
| import hashlib | ||||
| import os | ||||
| import sys | ||||
| from threading import Thread | ||||
| from socketserver import ThreadingMixIn | ||||
| from http.server import HTTPServer, SimpleHTTPRequestHandler | ||||
| from Crypto.Cipher import AES | ||||
|  | ||||
| KEY_SIZE = 32 | ||||
| OPENSSL_ENC_MAGIC = b'Salted__' | ||||
| PKCS5_SALT_LEN = 8 | ||||
|  | ||||
| ################################################################################################### | ||||
| args = None | ||||
| debug = False | ||||
| script_name = os.path.basename(__file__) | ||||
| script_path = os.path.dirname(os.path.realpath(__file__)) | ||||
| orig_path = os.getcwd() | ||||
|  | ||||
| ################################################################################################### | ||||
| # print to stderr | ||||
| def eprint(*args, **kwargs): | ||||
|   print(*args, file=sys.stderr, **kwargs) | ||||
|   sys.stderr.flush() | ||||
|  | ||||
| ################################################################################################### | ||||
| # convenient boolean argument parsing | ||||
| def str2bool(v): | ||||
|   if v.lower() in ('yes', 'true', 't', 'y', '1'): | ||||
|     return True | ||||
|   elif v.lower() in ('no', 'false', 'f', 'n', '0'): | ||||
|     return False | ||||
|   else: | ||||
|     raise argparse.ArgumentTypeError('Boolean value expected.') | ||||
|  | ||||
| ################################################################################################### | ||||
| # EVP_BytesToKey | ||||
| # | ||||
| # reference: https://github.com/openssl/openssl/blob/6f0ac0e2f27d9240516edb9a23b7863e7ad02898/crypto/evp/evp_key.c#L74 | ||||
| #            https://gist.github.com/chrono-meter/d122cbefc6f6248a0af554995f072460 | ||||
| def EVP_BytesToKey(key_length: int, iv_length: int, md, salt: bytes, data: bytes, count: int=1) -> (bytes, bytes): | ||||
|   assert data | ||||
|   assert salt == b'' or len(salt) == PKCS5_SALT_LEN | ||||
|  | ||||
|   md_buf = b'' | ||||
|   key = b'' | ||||
|   iv = b'' | ||||
|   addmd = 0 | ||||
|  | ||||
|   while key_length > len(key) or iv_length > len(iv): | ||||
|     c = md() | ||||
|     if addmd: | ||||
|       c.update(md_buf) | ||||
|     addmd += 1 | ||||
|     c.update(data) | ||||
|     c.update(salt) | ||||
|     md_buf = c.digest() | ||||
|     for i in range(1, count): | ||||
|       md_buf = md(md_buf) | ||||
|  | ||||
|     md_buf2 = md_buf | ||||
|  | ||||
|     if key_length > len(key): | ||||
|       key, md_buf2 = key + md_buf2[:key_length - len(key)], md_buf2[key_length - len(key):] | ||||
|  | ||||
|     if iv_length > len(iv): | ||||
|       iv = iv + md_buf2[:iv_length - len(iv)] | ||||
|  | ||||
|   return key, iv | ||||
|  | ||||
| ################################################################################################### | ||||
| # | ||||
| class HTTPHandler(SimpleHTTPRequestHandler): | ||||
|  | ||||
|   # return full path based on server base path and requested path | ||||
|   def translate_path(self, path): | ||||
|     path = SimpleHTTPRequestHandler.translate_path(self, path) | ||||
|     relpath = os.path.relpath(path, os.getcwd()) | ||||
|     fullpath = os.path.join(self.server.base_path, relpath) | ||||
|     return fullpath | ||||
|  | ||||
|   # override do_GET so that files are encrypted, if requested | ||||
|   def do_GET(self): | ||||
|     global debug | ||||
|     global args | ||||
|  | ||||
|     fullpath = self.translate_path(self.path) | ||||
|  | ||||
|     if (not args.encrypt) or os.path.isdir(fullpath): | ||||
|       # unencrypted, just use default implementation | ||||
|       SimpleHTTPRequestHandler.do_GET(self) | ||||
|  | ||||
|     else: | ||||
|       # encrypt file transfers | ||||
|       if os.path.isfile(fullpath) or os.path.islink(fullpath): | ||||
|         self.send_response(200) | ||||
|         self.send_header('Content-type', 'application/octet-stream') | ||||
|         self.send_header('Content-Disposition', f'attachment; filename={os.path.basename(fullpath)}.encrypted') | ||||
|         self.end_headers() | ||||
|         salt = os.urandom(PKCS5_SALT_LEN) | ||||
|         key, iv = EVP_BytesToKey(KEY_SIZE, AES.block_size, hashlib.sha256, salt, args.key.encode('utf-8')) | ||||
|         cipher = AES.new(key, AES.MODE_CBC, iv) | ||||
|         encrypted = b"" | ||||
|         encrypted += OPENSSL_ENC_MAGIC | ||||
|         encrypted += salt | ||||
|         self.wfile.write(encrypted) | ||||
|         with open(fullpath, 'rb') as f: | ||||
|           padding = b'' | ||||
|           while True: | ||||
|             chunk = f.read(cipher.block_size) | ||||
|             if len(chunk) < cipher.block_size: | ||||
|               remaining = cipher.block_size - len(chunk) | ||||
|               padding = bytes([remaining] * remaining) | ||||
|             self.wfile.write(cipher.encrypt(chunk + padding)) | ||||
|             if padding: | ||||
|               break | ||||
|  | ||||
|       else: | ||||
|         self.send_error(404, "Not Found") | ||||
|  | ||||
| ################################################################################################### | ||||
| # | ||||
| class ThreadingHTTPServer(ThreadingMixIn, HTTPServer): | ||||
|   def __init__(self, base_path, server_address, RequestHandlerClass=HTTPHandler): | ||||
|     self.base_path = base_path | ||||
|     HTTPServer.__init__(self, server_address, RequestHandlerClass) | ||||
|  | ||||
| ################################################################################################### | ||||
| # | ||||
| def serve_on_port(path : str, port : int): | ||||
|   server = ThreadingHTTPServer(path, ("", port)) | ||||
|   print(f"serving {path} at port {port}") | ||||
|   server.serve_forever() | ||||
|  | ||||
| ################################################################################################### | ||||
| # main | ||||
| def main(): | ||||
|   global args | ||||
|   global debug | ||||
|   global orig_path | ||||
|  | ||||
|   defaultDebug = os.getenv('EXTRACTED_FILE_HTTP_SERVER_DEBUG', 'false') | ||||
|   defaultEncrypt = os.getenv('EXTRACTED_FILE_HTTP_SERVER_ENCRYPT', 'false') | ||||
|   defaultPort = int(os.getenv('EXTRACTED_FILE_HTTP_SERVER_PORT', 8440)) | ||||
|   defaultKey = os.getenv('EXTRACTED_FILE_HTTP_SERVER_KEY', 'quarantined') | ||||
|   defaultDir = os.getenv('EXTRACTED_FILE_HTTP_SERVER_PATH', orig_path) | ||||
|  | ||||
|   parser = argparse.ArgumentParser(description=script_name, add_help=False, usage='{} <arguments>'.format(script_name)) | ||||
|   parser.add_argument('-v', '--verbose', dest='debug', type=str2bool, nargs='?', const=True, default=defaultDebug, metavar='true|false', help=f"Verbose/debug output ({defaultDebug})") | ||||
|   parser.add_argument('-p', '--port', dest='port', help=f"Server port ({defaultPort})", metavar='<port>', type=int, default=defaultPort) | ||||
|   parser.add_argument('-d', '--directory', dest='serveDir', help=f'Directory to serve ({defaultDir})', metavar='<directory>', type=str, default=defaultDir) | ||||
|   parser.add_argument('-e', '--encrypt', dest='encrypt', type=str2bool, nargs='?', const=True, default=defaultEncrypt, metavar='true|false', help=f"Encrypt files with aes-256-cbc ({defaultEncrypt})") | ||||
|   parser.add_argument('-k', '--key', dest='key', help=f"File encryption key", metavar='<str>', type=str, default=defaultKey) | ||||
|   try: | ||||
|     parser.error = parser.exit | ||||
|     args = parser.parse_args() | ||||
|   except SystemExit: | ||||
|     parser.print_help() | ||||
|     exit(2) | ||||
|  | ||||
|   debug = args.debug | ||||
|   if debug: | ||||
|     eprint(os.path.join(script_path, script_name)) | ||||
|     eprint("Arguments: {}".format(sys.argv[1:])) | ||||
|     eprint("Arguments: {}".format(args)) | ||||
|   else: | ||||
|     sys.tracebacklimit = 0 | ||||
|  | ||||
|   Thread(target=serve_on_port, args=[args.serveDir, args.port]).start() | ||||
|  | ||||
| ################################################################################################### | ||||
| if __name__ == '__main__': | ||||
|   main() | ||||
							
								
								
									
										121
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_install_plugins.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										121
									
								
								Vagrant/resources/malcolm/shared/bin/zeek_install_plugins.sh
									
									
									
									
									
										Executable file
									
								
							| @@ -0,0 +1,121 @@ | ||||
| #!/bin/bash | ||||
|  | ||||
| # Copyright (c) 2021 Battelle Energy Alliance, LLC.  All rights reserved. | ||||
|  | ||||
| if [ -z "$BASH_VERSION" ]; then | ||||
|   echo "Wrong interpreter, please run \"$0\" with bash" | ||||
|   exit 1 | ||||
| fi | ||||
|  | ||||
| SPICY_DIR=${SPICY_DIR:-/opt/spicy} | ||||
| ZEEK_DIR=${ZEEK_DIR:-/opt/zeek} | ||||
|  | ||||
| # going to clone under /usr/local/src | ||||
| SRC_BASE_DIR="/usr/local/src" | ||||
| mkdir -p "$SRC_BASE_DIR" | ||||
|  | ||||
| # | ||||
| # get_latest_github_tagged_release | ||||
| # | ||||
| # get the latest GitHub release tag name given a github repo URL | ||||
| # | ||||
| function get_latest_github_tagged_release() { | ||||
|   REPO_URL="$1" | ||||
|   REPO_NAME="$(echo "$REPO_URL" | sed 's|.*github\.com/||')" | ||||
|   LATEST_URL="https://github.com/$REPO_NAME/releases/latest" | ||||
|   REDIRECT_URL="$(curl -fsSLI -o /dev/null -w %{url_effective} "$LATEST_URL" 2>/dev/null)" | ||||
|   if [[ "$LATEST_URL" = "$REDIRECT_URL"/latest ]]; then | ||||
|     echo "" | ||||
|   else | ||||
|     echo "$REDIRECT_URL" | sed 's|.*tag/||' | ||||
|   fi | ||||
| } | ||||
|  | ||||
| # | ||||
| # clone_github_repo | ||||
| # | ||||
| # clone the latest GitHub release tag if available (else, master/HEAD) under $SRC_BASE_DIR | ||||
| # release tag/branch can be overriden by specifying the branch name with after the URL delimited by a | | ||||
| # | ||||
| function clone_github_repo() { | ||||
|   URL_PARAM="$1" | ||||
|   URL_BRANCH_DELIM='|' | ||||
|   URL_BRANCH_DELIM_COUNT="$(awk -F"${URL_BRANCH_DELIM}" '{print NF-1}' <<< "${URL_PARAM}")" | ||||
|   if (( $URL_BRANCH_DELIM_COUNT > 0 )); then | ||||
|     REPO_URL="$(echo "$URL_PARAM" | cut -d'|' -f1)" | ||||
|     BRANCH_OVERRIDE="$(echo "$URL_PARAM" | cut -d'|' -f2)" | ||||
|   else | ||||
|     REPO_URL="$URL_PARAM" | ||||
|     BRANCH_OVERRIDE="" | ||||
|   fi | ||||
|   if [[ -n $REPO_URL ]]; then | ||||
|     if [[ -n $BRANCH_OVERRIDE ]]; then | ||||
|       REPO_LATEST_RELEASE="$BRANCH_OVERRIDE" | ||||
|     else | ||||
|       REPO_LATEST_RELEASE="$(get_latest_github_tagged_release "$REPO_URL")" | ||||
|     fi | ||||
|     SRC_DIR="$SRC_BASE_DIR"/"$(echo "$REPO_URL" | sed 's|.*/||')" | ||||
|     rm -rf "$SRC_DIR" | ||||
|     if [[ -n $REPO_LATEST_RELEASE ]]; then | ||||
|       git -c core.askpass=true clone --depth=1 --single-branch --branch "$REPO_LATEST_RELEASE" --recursive --shallow-submodules "$REPO_URL" "$SRC_DIR" >/dev/null 2>&1 | ||||
|     else | ||||
|       git -c core.askpass=true clone --depth=1 --single-branch --recursive --shallow-submodules "$REPO_URL" "$SRC_DIR" >/dev/null 2>&1 | ||||
|     fi | ||||
|     [ $? -eq 0 ] && echo "$SRC_DIR" || echo "cloning \"$REPO_URL\" failed" >&2 | ||||
|   fi | ||||
| } | ||||
|  | ||||
| # don't consume as many resources when building spicy-analyzers, even if it's slower. | ||||
| # https://github.com/zeek/spicy-analyzers/pull/60 | ||||
| export SPICY_ZKG_PROCESSES=1 | ||||
|  | ||||
| # install Zeek packages that install nicely using zkg | ||||
| ZKG_GITHUB_URLS=( | ||||
|   "https://github.com/0xl3x1/zeek-EternalSafety" | ||||
|   "https://github.com/0xxon/cve-2020-0601" | ||||
|   "https://github.com/0xxon/cve-2020-13777" | ||||
|   "https://github.com/amzn/zeek-plugin-profinet" | ||||
|   "https://github.com/amzn/zeek-plugin-s7comm" | ||||
|   "https://github.com/amzn/zeek-plugin-tds" | ||||
|   "https://github.com/cisagov/icsnpp-bacnet" | ||||
|   "https://github.com/cisagov/icsnpp-bsap" | ||||
|   "https://github.com/cisagov/icsnpp-dnp3" | ||||
|   "https://github.com/cisagov/icsnpp-enip" | ||||
|   "https://github.com/cisagov/icsnpp-ethercat" | ||||
|   "https://github.com/cisagov/icsnpp-modbus" | ||||
|   "https://github.com/corelight/callstranger-detector" | ||||
|   "https://github.com/corelight/CVE-2020-16898" | ||||
|   "https://github.com/corelight/CVE-2021-31166" | ||||
|   "https://github.com/corelight/pingback" | ||||
|   "https://github.com/corelight/ripple20" | ||||
|   "https://github.com/corelight/SIGRed" | ||||
|   "https://github.com/corelight/zeek-community-id" | ||||
|   "https://github.com/corelight/zeek-xor-exe-plugin|master" | ||||
|   "https://github.com/corelight/zerologon" | ||||
|   "https://github.com/cybera/zeek-sniffpass" | ||||
|   "https://github.com/J-Gras/zeek-af_packet-plugin" | ||||
|   "https://github.com/mitre-attack/bzar" | ||||
|   "https://github.com/mmguero-dev/GQUIC_Protocol_Analyzer|topic/zeek-4-compat" | ||||
|   "https://github.com/precurse/zeek-httpattacks" | ||||
|   "https://github.com/salesforce/hassh" | ||||
|   "https://github.com/salesforce/ja3" | ||||
|   "https://github.com/mmguero-dev/spicy-analyzers" | ||||
| ) | ||||
| for i in ${ZKG_GITHUB_URLS[@]}; do | ||||
|   SRC_DIR="$(clone_github_repo "$i")" | ||||
|   [[ -d "$SRC_DIR" ]] && zkg install --force --skiptests "$SRC_DIR" | ||||
| done | ||||
|  | ||||
| # TODO | ||||
| # https://github.com/zeek/spicy-analyzers | ||||
| # A collection of zeek-hosted spicy analyzers, some of which | ||||
| # "replace" the built-in zeek parsers for those protocols. | ||||
| # We need to compare the built-in ones, but use what we're used to until | ||||
| # we make the decision with eyes open. As of 2021/03/24, that list is: | ||||
| # - DHCP      - compare to Zeek DHCP | ||||
| # - DNS       - compare to Zeek DNS | ||||
| # - HTTP      - compare to Zeek HTTP | ||||
| # - IPSEC | ||||
| # - OpenVPN | ||||
| # - TFTP | ||||
| # - WireGuard | ||||
		Reference in New Issue
	
	Block a user