added Malcolm
This commit is contained in:
20
Vagrant/resources/malcolm/logstash/certs/ca.crt
Normal file
20
Vagrant/resources/malcolm/logstash/certs/ca.crt
Normal file
@@ -0,0 +1,20 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDUTCCAjmgAwIBAgIURx/1ZsLxqAK+T1Oa61Q1h3oCHeIwDQYJKoZIhvcNAQEL
|
||||
BQAwODELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAklEMQ8wDQYDVQQKDAZzZW5zb3Ix
|
||||
CzAJBgNVBAsMAmNhMB4XDTIxMDgwNTE0MTc0MloXDTQ4MTIyMDE0MTc0MlowODEL
|
||||
MAkGA1UEBhMCVVMxCzAJBgNVBAgMAklEMQ8wDQYDVQQKDAZzZW5zb3IxCzAJBgNV
|
||||
BAsMAmNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnUhTAcT8/FIf
|
||||
s0EyJSkF+R334psztsY62lbj7Oh9l4EtQRk6xWebk2kl256mPfBd4Dj7FozjbGui
|
||||
4otMd6tzmrw1BEhE1qRB4/tdGl/KANnGMdaWwr7qDmu5jPFrXDawRMfce8ruaQy7
|
||||
n78kfw289XvaDPYZ/FB02AIgNjK/XgtvgnGZs/Fo7PGUOccHLK9FxoAXAdrVO9we
|
||||
3IRys/beaFiOMWaVDyqO/hlrwAprQi+sESWcmbXU5KA3pOlcU7KdtlzgQpxw1zVf
|
||||
CocQ8hialGL+U1ypz6vzAPS49kg3/VAUTojvQioUsnPPhm1CRS8DTL4o0/kk63BT
|
||||
i0kBg7S9KwIDAQABo1MwUTAdBgNVHQ4EFgQUAz1iXD+iAOMmCT2SzZfiF1GqKccw
|
||||
HwYDVR0jBBgwFoAUAz1iXD+iAOMmCT2SzZfiF1GqKccwDwYDVR0TAQH/BAUwAwEB
|
||||
/zANBgkqhkiG9w0BAQsFAAOCAQEAJ84aTY+0M9C7gYoEr6fklTLaiVIkSFSJZeam
|
||||
+JJ0DfI2Fyc4Qu53mr9ZaWZ6qUAyun1/i590WcG8h00dQCNKy27NvI9it8o/HSNS
|
||||
jnuDaXZ8Uv/u+r3vhma/BCuR5J0DQJ5t3mlJmy3VjIRDzAUhrAZFvqcO8xQDX/4l
|
||||
CmCST4ACv6x/9pyzQcVTO0JjFiOaDpybC+nwuhSgr00lTdi0Hk0yHcIPlO2gXouY
|
||||
uO4FLfFWkiPULN9HhW5g/bWNGIdKQDrV5Tr7b5q0lC5gnyevTV86Bl7AEfU7Rbfe
|
||||
yJvG/0Wt+JC25Zrn7/bqYjj4yv+sppE5WcyCFJzC+7yTkg7wcQ==
|
||||
-----END CERTIFICATE-----
|
||||
27
Vagrant/resources/malcolm/logstash/certs/ca.key
Normal file
27
Vagrant/resources/malcolm/logstash/certs/ca.key
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAnUhTAcT8/FIfs0EyJSkF+R334psztsY62lbj7Oh9l4EtQRk6
|
||||
xWebk2kl256mPfBd4Dj7FozjbGui4otMd6tzmrw1BEhE1qRB4/tdGl/KANnGMdaW
|
||||
wr7qDmu5jPFrXDawRMfce8ruaQy7n78kfw289XvaDPYZ/FB02AIgNjK/XgtvgnGZ
|
||||
s/Fo7PGUOccHLK9FxoAXAdrVO9we3IRys/beaFiOMWaVDyqO/hlrwAprQi+sESWc
|
||||
mbXU5KA3pOlcU7KdtlzgQpxw1zVfCocQ8hialGL+U1ypz6vzAPS49kg3/VAUTojv
|
||||
QioUsnPPhm1CRS8DTL4o0/kk63BTi0kBg7S9KwIDAQABAoIBACbyFbvGb257V2Yn
|
||||
qqcQ9W2uQgdWrwN/KmuOQHoVR6QQVIheGvNuqFNkIf+z1S5ZKqWu9l2rHVVngbnf
|
||||
DFxFlCaom+gnjQQpQDeXsoIpXfe/9Re6LzKh24semZ+Q2CjJUfR2GN5+bXKNuJfh
|
||||
2MOtPzD6LhAvkFeuBIDIZfmFr2U1xEMvPQ4XJd+nKupXEoaCnhIil3HSMmQ8mXSI
|
||||
kJULaxhnzrDqlozxjy0//aWXlJzfPrY4gnK3H/vtkv+5oniXN3Erid/3KU+wkhdk
|
||||
Ym7QmaktH10oOPGllcemJdyXKuDmg/f0r2DQ6AOMpuWW20tpxF/dSKEqdYbZMuTo
|
||||
PruZo4ECgYEAz9yZzses30amdoOm9hm1/84/wftFQBdpWOkZezzwLyTQ1MZvkATQ
|
||||
QcvFNGMuoAIahT9l6unCuDZpvaOHgOPt600+IiRceoxB9yAdrgRHnYhbL5lc3fQx
|
||||
IJczoM+JFijHUGI6NWmVOESwP2Tr+r0GaUklaGWXEr6PhmIVKMWi2bECgYEAwbUO
|
||||
4DrjXO4V8dUjtn/eIXxPk94cXNFsb7y9nHK8OTR3ZTrcpubDHC+g9tlGJaqpghpj
|
||||
3zH5whUrMs6jov1G35WvcqhMT0h1zqT2MtvZDtA66SE9GN+7EU4eBJwpNoa1fXat
|
||||
Yz9+ksht5Yi11BWtTlSXi4OZblhPxKwWUjbGn5sCgYA1oggVsLPMUKB3B/U2nt+1
|
||||
B3SwCrONm/zUsiCfCdQ3ddpkbGcw0+0gbQGPSOAo7SIGq5sSiAp0GaYjBmTaPvWO
|
||||
tzmG+bba8AG9UA4YboZEAYD1wS6THoTmkd0Ca0pF63OyLAY07e9Qu3h2gwXYhpl2
|
||||
5Sa/zyax8XV7CSfIg2ZHoQKBgQCT7fKjWTWDJauKmKwCVlpMFkQrd4cPdzuphWkE
|
||||
TjbiNOgtXatepoJARztU6I8CeveijQbST7cd+c6YqWgcM/JpE0X+ePp13a9iFWaT
|
||||
ZoO4JVun6CNhmJKi35ZbDOsIYg299+DoF3hRHlGW5jV99uP4Gu/0spZ8x2J7nrQZ
|
||||
Kw26mwKBgAZFdJUgIYYGvjdelUUfOhoQXccr3zsWqpl9iJt7+jCs/p/hVMQtgMPS
|
||||
9wxw7Ilb9s8Q6n44E8Yc4pcnN3n5QeEwjCEO7ZOsiDy75iOIsIJdADdORUypiIPT
|
||||
E1VHvilTWV/UyA1HuDGWC78rhDY1zWq0tfossy9mO8XkwTtFKROf
|
||||
-----END RSA PRIVATE KEY-----
|
||||
28
Vagrant/resources/malcolm/logstash/certs/client.conf
Normal file
28
Vagrant/resources/malcolm/logstash/certs/client.conf
Normal file
@@ -0,0 +1,28 @@
|
||||
# Copyright (c) 2021 Battelle Energy Alliance, LLC. All rights reserved.
|
||||
|
||||
# one may wish to consider not using self-signed certificates in production
|
||||
|
||||
[req]
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_req
|
||||
prompt = no
|
||||
|
||||
[req_distinguished_name]
|
||||
countryName = US
|
||||
stateOrProvinceName = ID
|
||||
organizationName = sensor
|
||||
organizationalUnitName = beats
|
||||
|
||||
[ usr_cert ]
|
||||
# Extensions for server certificates (`man x509v3_config`).
|
||||
basicConstraints = CA:FALSE
|
||||
nsCertType = client, server
|
||||
nsComment = "OpenSSL Beats Server / Client Certificate"
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid,issuer:always
|
||||
keyUsage = critical, digitalSignature, keyEncipherment, keyAgreement, nonRepudiation
|
||||
extendedKeyUsage = serverAuth, clientAuth
|
||||
|
||||
[v3_req]
|
||||
keyUsage = keyEncipherment, dataEncipherment
|
||||
extendedKeyUsage = serverAuth, clientAuth
|
||||
18
Vagrant/resources/malcolm/logstash/certs/server.conf
Normal file
18
Vagrant/resources/malcolm/logstash/certs/server.conf
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright (c) 2021 Battelle Energy Alliance, LLC. All rights reserved.
|
||||
|
||||
# one may wish to consider not using self-signed certificates in production
|
||||
|
||||
[req]
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = v3_req
|
||||
prompt = no
|
||||
|
||||
[req_distinguished_name]
|
||||
countryName = US
|
||||
stateOrProvinceName = ID
|
||||
organizationName = sensor
|
||||
organizationalUnitName = logstash
|
||||
|
||||
[v3_req]
|
||||
keyUsage = keyEncipherment, dataEncipherment
|
||||
extendedKeyUsage = serverAuth
|
||||
19
Vagrant/resources/malcolm/logstash/certs/server.crt
Normal file
19
Vagrant/resources/malcolm/logstash/certs/server.crt
Normal file
@@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDKDCCAhCgAwIBAgIUc//014VwkY4+XcAFLYTsPuuNQT8wDQYJKoZIhvcNAQEN
|
||||
BQAwODELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAklEMQ8wDQYDVQQKDAZzZW5zb3Ix
|
||||
CzAJBgNVBAsMAmNhMB4XDTIxMDgwNTE0MTc0MloXDTMxMDgwMzE0MTc0MlowPjEL
|
||||
MAkGA1UEBhMCVVMxCzAJBgNVBAgMAklEMQ8wDQYDVQQKDAZzZW5zb3IxETAPBgNV
|
||||
BAsMCGxvZ3N0YXNoMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyLwm
|
||||
H/ygtAReGswYMMXvjbJ/bZAt9rhv3NH8IZe8UNFtdj5BZCyW7ELhFYij4WdPBrKu
|
||||
8/JjGY4baJRfWfymycxgMc+vLnu+LYGrCgMGEbPKeL1mwtCPBkL0YdA98rmjxOu1
|
||||
33VT3h4FMJjpnUUlOYwvXv7f956QaQRoV/j8dBV4fy0BWFARlg0/m15bZjJXCnEv
|
||||
cmXc9snVJSP+qCrq9al7/lhWD3fkIWTvZUHl9d9HvGxtjhzKon/fxArNBBinMzjt
|
||||
SYq8BkB4M1Ufa5Ak2WG8vJCmx8moJVTwpvdHa0tdNTdT49sM/bFif1N9VXeru2+C
|
||||
POoNOvYDUCvkBOYC+wIDAQABoyQwIjALBgNVHQ8EBAMCBDAwEwYDVR0lBAwwCgYI
|
||||
KwYBBQUHAwEwDQYJKoZIhvcNAQENBQADggEBACH8MIi0lm3pS+fXPqICTXPt6EOy
|
||||
qCO0otxTcxyYWlsZn/2790BZkX1rQTDGqa3GyMEV1MCAofeh/8IHm2CLT9VqY6hq
|
||||
OXQ2W6XPdnWBXmqetjfi5ZOzUGNtxlIBBnFBF8c+DoQ7jkjfs5rDBrpAbKfULWxr
|
||||
MNQLeDaYzVNMiLH3Dlkb9alqTbIBeqG9s0z4iyc2n1O4GH/i1o3RilUJ8tWoR/qp
|
||||
Cfx9+u2+fYrFnXNuSAf4NlfqFrt+6gQGN6kc5tP/6hsjDSuiTREmGNljBwkW9Wle
|
||||
DFHXXjHubP8SMPOsZF/g49TmpNdQ3ZZCgAJU3JPAGIsUZp8FdiHtFRMfE0s=
|
||||
-----END CERTIFICATE-----
|
||||
28
Vagrant/resources/malcolm/logstash/certs/server.key
Normal file
28
Vagrant/resources/malcolm/logstash/certs/server.key
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDIvCYf/KC0BF4a
|
||||
zBgwxe+Nsn9tkC32uG/c0fwhl7xQ0W12PkFkLJbsQuEViKPhZ08Gsq7z8mMZjhto
|
||||
lF9Z/KbJzGAxz68ue74tgasKAwYRs8p4vWbC0I8GQvRh0D3yuaPE67XfdVPeHgUw
|
||||
mOmdRSU5jC9e/t/3npBpBGhX+Px0FXh/LQFYUBGWDT+bXltmMlcKcS9yZdz2ydUl
|
||||
I/6oKur1qXv+WFYPd+QhZO9lQeX130e8bG2OHMqif9/ECs0EGKczOO1JirwGQHgz
|
||||
VR9rkCTZYby8kKbHyaglVPCm90drS101N1Pj2wz9sWJ/U31Vd6u7b4I86g069gNQ
|
||||
K+QE5gL7AgMBAAECggEBAMI91oKTWgp4Q3uYzTu8/W7XMBmcL+4TUSOUCm+TPcXS
|
||||
siJ+YZaG01Ax8058F2DRMBZiJ4ulnCGpcnpDQPV51wdk/hyVRIYtdbzhQdoBTQkq
|
||||
Oh2V4W9Yk13Swlo4QQm0e3XdRPPDu/EuiYUaIs1tkHTcYyaAacdaJ5GHk1S1ecz1
|
||||
FxLWYYye0JLYqfIxucYMAYQPGY0cBKu0x9dyqgqd1VdwpfoSJSUA8JComcBa5jUW
|
||||
PkL68jd7wKD8rUEQHPq7oemSo240YtDOxTxjosQdbGCb/UwUVnJCVC4ZqRUvRgvb
|
||||
I2EIdAtiHnisIIZdgjXI4TWZC66KZm1WMro8IylALTkCgYEA5ShISldKUdF8yNC2
|
||||
Kau9aQxvooqe6SOBZz0hweGVyTVQitnd+DDeaRXJSWw+KRBJr67WyaFmHOghnPCm
|
||||
ianE7J0eeX8wwmdkF2+sITg0Rh2t1y6Hsoi2UbADNm1B2QNVTYGRnsS/JCekmG49
|
||||
MPI2kvMQZnfWKMtUrFZVb3Qtf4UCgYEA4D+RLixtq8ViAGnyAbtm+e7G4KOpkt4A
|
||||
tbYi69I2hrW+LKlrp+Me0SW0fz59Jk0QsJykk1obMJS+h21+dBj1BxDyuv7mHI7b
|
||||
UfcAFOiRniDqTPf1Eyx5mXmcaTljbF30Cbfzf/U4AYvKhmBL6I+plrQvxrRgkzJz
|
||||
TuR+777wwH8CgYEA4w8z+f1eFY32D+DF7qpN5o28+5hQ9v8IVGw9S6ejTBg4syxH
|
||||
BYoeho9gwBCx0uxhpuMdzfHPhSr5S1EkItmF11GjhY1ime3qPlKXgt725Na5jrJ7
|
||||
IIrX5D78H+zIRyhHWKHe0StvnXpuW983YI1PR/HxuSZanFdEkfKFewbpoiUCgYA7
|
||||
yZbqiZvf3StI0vZb8dv9rOTHRbtTVe84g0wCNbSl69S9UZpLU6mGt5fNCRsS2B8o
|
||||
B0t1oeN362B0+QNq7rB9Zs5gfs9ZM9lTnBggevVABKSTStTDfOsauSqzf0J7xPzo
|
||||
jmvUZMu+1cd0Vj5Gze64KGhIgRTyidPmZ9NUFkNX/wKBgQDRw1AZuhPNlt8fkhiL
|
||||
czoXzczpqjUIe0yQ3OfZbKZz1IH55qR0KTRgAU0woy0XVwArssUwdOkgspZZ35mp
|
||||
jHFsU7y1hcZNm80l1gmkHWZVoxPTf6P869JFJa1k6gw8kYZ2a4F1D2LY11MU3Jpy
|
||||
w4wWKuqPzZLcYtkpBljyONfLsg==
|
||||
-----END PRIVATE KEY-----
|
||||
20
Vagrant/resources/malcolm/logstash/config/log4j2.properties
Normal file
20
Vagrant/resources/malcolm/logstash/config/log4j2.properties
Normal file
@@ -0,0 +1,20 @@
|
||||
status = error
|
||||
name = LogstashPropertiesConfig
|
||||
|
||||
appender.console.type = Console
|
||||
appender.console.name = plain_console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %m%n
|
||||
|
||||
appender.json_console.type = Console
|
||||
appender.json_console.name = json_console
|
||||
appender.json_console.layout.type = JSONLayout
|
||||
appender.json_console.layout.compact = true
|
||||
appender.json_console.layout.eventEol = true
|
||||
|
||||
rootLogger.level = ${sys:ls.log.level}
|
||||
rootLogger.appenderRef.console.ref = ${sys:ls.log.format}_console
|
||||
|
||||
# Dissector is crazy
|
||||
logger.ShutUpDissector.name = org.logstash.dissect.Dissector
|
||||
logger.ShutUpDissector.level = error
|
||||
2
Vagrant/resources/malcolm/logstash/config/logstash.yml
Normal file
2
Vagrant/resources/malcolm/logstash/config/logstash.yml
Normal file
@@ -0,0 +1,2 @@
|
||||
http.host: "0.0.0.0"
|
||||
|
||||
13
Vagrant/resources/malcolm/logstash/maps/conn_states.yaml
Normal file
13
Vagrant/resources/malcolm/logstash/maps/conn_states.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
"S0": "Connection attempt seen, no reply"
|
||||
"S1": "Connection established, not terminated"
|
||||
"S2": "Connection established and close attempt by originator seen (but no reply from responder)"
|
||||
"S3": "Connection established and close attempt by responder seen (but no reply from originator)"
|
||||
"SF": "Normal SYN/FIN completion"
|
||||
"REJ": "Connection attempt rejected"
|
||||
"RSTO": "Connection established, originator aborted (sent a RST)"
|
||||
"RSTR": "Established, responder aborted"
|
||||
"RSTOS0": "Originator sent a SYN followed by a RST, we never saw a SYN-ACK from the responder"
|
||||
"RSTRH": "Responder sent a SYN ACK followed by a RST, we never saw a SYN from the (purported) originator"
|
||||
"SH": "Originator sent a SYN followed by a FIN, we never saw a SYN ACK from the responder (hence the connection was 'half' open)"
|
||||
"SHR": "Responder sent a SYN ACK followed by a FIN, we never saw a SYN from the originator"
|
||||
"OTH": "No SYN seen, just midstream traffic (a 'partial connection' that was not later closed)"
|
||||
@@ -0,0 +1,49 @@
|
||||
"110": "Restart marker replay"
|
||||
"120": "Service ready in n minutes"
|
||||
"125": "Data connection already open; transfer starting"
|
||||
"150": "Opening data connection"
|
||||
"200": "Success"
|
||||
"202": "Command not implemented"
|
||||
"211": "System status or help reply"
|
||||
"212": "Directory status"
|
||||
"213": "File status"
|
||||
"214": "Help message"
|
||||
"215": "System type"
|
||||
"220": "Service ready for new user"
|
||||
"221": "Service closing control connection"
|
||||
"225": "Data connection open; no transfer in progress"
|
||||
"226": "Closing data connection"
|
||||
"227": "Entering Passive Mode"
|
||||
"228": "Entering Long Passive Mode"
|
||||
"229": "Entering Extended Passive Mode"
|
||||
"230": "User logged in"
|
||||
"231": "User logged out"
|
||||
"232": "Logout command noted"
|
||||
"234": "Authentication method accepted"
|
||||
"250": "Successful file action"
|
||||
"257": "Successful directory creation"
|
||||
"331": "Username okay, need password"
|
||||
"332": "Need account for login"
|
||||
"350": "Requested file action pending further information"
|
||||
"421": "Service not available"
|
||||
"425": "Can't open data connection"
|
||||
"426": "Connection closed; transfer aborted"
|
||||
"430": "Invalid username or password"
|
||||
"434": "Requested host unavailable"
|
||||
"450": "Requested file action not taken"
|
||||
"451": "Requested action aborted; local error in processing"
|
||||
"452": "Requested action not taken; file system error"
|
||||
"501": "Syntax error"
|
||||
"502": "Command not implemented"
|
||||
"503": "Bad command sequence"
|
||||
"504": "Command not implemented for that parameter"
|
||||
"530": "Not logged in"
|
||||
"532": "Need account for storing files"
|
||||
"534": "Could not connect; policy requires SSL"
|
||||
"550": "Requested action not taken; file unavailable"
|
||||
"551": "Requested action aborted; page type unknown"
|
||||
"552": "Requested file action aborted; exceeded storage allocation"
|
||||
"553": "Requested action not taken; file name not allowed"
|
||||
"631": "Integrity protected"
|
||||
"632": "Confidentiality and integrity protected"
|
||||
"633": "Confidentiality protected"
|
||||
@@ -0,0 +1,97 @@
|
||||
"100": "Continue"
|
||||
"101": "Switching Protocols"
|
||||
"102": "Processing"
|
||||
"103": "Early Hints"
|
||||
"110": "Response is Stale"
|
||||
"111": "Revalidation Failed"
|
||||
"112": "Disconnected Operation"
|
||||
"113": "Heuristic Expiration"
|
||||
"199": "Miscellaneous Warning"
|
||||
"200": "Success"
|
||||
"201": "Created"
|
||||
"202": "Accepted"
|
||||
"203": "Non-authoritative Information"
|
||||
"204": "No Content"
|
||||
"205": "Reset Content"
|
||||
"206": "Partial Content"
|
||||
"207": "Multi-status"
|
||||
"208": "Already Reported"
|
||||
"214": "Transformation Applied"
|
||||
"218": "This Is Fine"
|
||||
"226": "IM Used"
|
||||
"299": "Miscellaneous Persistent Warning"
|
||||
"300": "Multiple Choices"
|
||||
"301": "Moved Permanently"
|
||||
"302": "Redirect"
|
||||
"303": "See Other"
|
||||
"304": "Not Modified"
|
||||
"305": "Use Proxy"
|
||||
"306": "Switch Proxy"
|
||||
"307": "Temporary Redirect"
|
||||
"308": "Permanent Redirect"
|
||||
"400": "Bad Request"
|
||||
"401": "Not Authorized"
|
||||
"402": "Payment Required"
|
||||
"403": "Forbidden"
|
||||
"404": "Error On Wikipedia"
|
||||
"404": "Not Found"
|
||||
"405": "Method Not Allowed"
|
||||
"406": "Not Acceptable"
|
||||
"407": "Proxy Authentication Required"
|
||||
"408": "Request Timeout"
|
||||
"409": "Conflict"
|
||||
"410": "Gone"
|
||||
"411": "Length Required"
|
||||
"412": "Precondition Failed"
|
||||
"413": "Payload Too Large"
|
||||
"414": "URI Too Long"
|
||||
"415": "Unsupported Media Type"
|
||||
"416": "Range Not Satisfiable"
|
||||
"417": "Expectation Failed"
|
||||
"418": "I'm A Teapot"
|
||||
"419": "Page Expired"
|
||||
"420": "Method Failure / Enhance Your Calm"
|
||||
"421": "Misdirected Request"
|
||||
"422": "Unprocessable Entity"
|
||||
"423": "Locked"
|
||||
"424": "Failed Dependency"
|
||||
"425": "Too Early"
|
||||
"426": "Upgrade Required"
|
||||
"427": "Unassigned"
|
||||
"428": "Precondition Required"
|
||||
"429": "Too Many Requests"
|
||||
"430": "Unassigned"
|
||||
"431": "Request Header Fields Too Large"
|
||||
"440": "Login Timeout"
|
||||
"444": "No Response"
|
||||
"449": "Retry With"
|
||||
"450": "Blocked By Windows Parental Controls"
|
||||
"451": "Unavailable For Legal Reasons"
|
||||
"494": "Request Header Too Large"
|
||||
"495": "SSL Certificate Error"
|
||||
"496": "SSL Certificate Required"
|
||||
"497": "HTTP Request Sent To HTTPS Port"
|
||||
"498": "Invalid Token"
|
||||
"499": "Client Closed Request"
|
||||
"500": "Internal Server Error"
|
||||
"501": "Unsupported Method"
|
||||
"502": "Bad Gateway"
|
||||
"503": "Service Unavailable"
|
||||
"504": "Gateway Timeout"
|
||||
"505": "HTTP Version Not Supported"
|
||||
"506": "Variant Also Negotiates"
|
||||
"507": "Insufficient Storage"
|
||||
"508": "Loop Detected"
|
||||
"509": "Bandwidth Limit Exceeded"
|
||||
"510": "Not Extended"
|
||||
"511": "Network Authentication Required"
|
||||
"520": "Web Server Returned An Unknown Error"
|
||||
"521": "Web Server Is Down"
|
||||
"522": "Connection Timed Out"
|
||||
"523": "Origin Is Unreachable"
|
||||
"524": "A Timeout Occurred"
|
||||
"525": "SSL Handshake Failed"
|
||||
"526": "Invalid SSL Certificate"
|
||||
"527": "Railgun Error"
|
||||
"529": "Site Is Overloaded"
|
||||
"530": "Site Is Frozen"
|
||||
@@ -0,0 +1,56 @@
|
||||
"ip": "0"
|
||||
"hopopt": "0"
|
||||
"icmp": "1"
|
||||
"igmp": "2"
|
||||
"ggp": "3"
|
||||
"ipencap": "4"
|
||||
"st": "5"
|
||||
"tcp": "6"
|
||||
"egp": "8"
|
||||
"igp": "9"
|
||||
"pup": "12"
|
||||
"udp": "17"
|
||||
"hmp": "20"
|
||||
"xns-idp": "22"
|
||||
"rdp": "27"
|
||||
"iso-tp4": "29"
|
||||
"dccp": "33"
|
||||
"xtp": "36"
|
||||
"ddp": "37"
|
||||
"idpr-cmtp": "38"
|
||||
"ipv6": "41"
|
||||
"ipv6-route": "43"
|
||||
"ipv6-frag": "44"
|
||||
"idrp": "45"
|
||||
"rsvp": "46"
|
||||
"gre": "47"
|
||||
"esp": "50"
|
||||
"ah": "51"
|
||||
"skip": "57"
|
||||
"ipv6-icmp": "58"
|
||||
"ipv6-nonxt": "59"
|
||||
"ipv6-opts": "60"
|
||||
"rspf": "73"
|
||||
"vmtp": "81"
|
||||
"eigrp": "88"
|
||||
"ospf": "89"
|
||||
"ax.25": "93"
|
||||
"ipip": "94"
|
||||
"etherip": "97"
|
||||
"encap": "98"
|
||||
"#": "99"
|
||||
"pim": "103"
|
||||
"ipcomp": "108"
|
||||
"vrrp": "112"
|
||||
"l2tp": "115"
|
||||
"isis": "124"
|
||||
"sctp": "132"
|
||||
"fc": "133"
|
||||
"mobility-header": "135"
|
||||
"udplite": "136"
|
||||
"mpls-in-ip": "137"
|
||||
"manet": "138"
|
||||
"hip": "139"
|
||||
"shim6": "140"
|
||||
"wesp": "141"
|
||||
"rohc": "142"
|
||||
@@ -0,0 +1,15 @@
|
||||
"Collection": "TA0009"
|
||||
"Command_and_Control": "TA0011"
|
||||
"Credential_Access": "TA0006"
|
||||
"Defense_Evasion": "TA0005"
|
||||
"Discovery": "TA0007"
|
||||
"Execution": "TA0002"
|
||||
"Exfiltration": "TA0010"
|
||||
"Impact": "TA0040"
|
||||
"Initial_Access": "TA0001"
|
||||
"Lateral_Movement": "TA0008"
|
||||
"Lateral_Movement_and_Execution": [ "TA0002", "TA0008" ]
|
||||
"Lateral_Movement_Extracted_File": "TA0008"
|
||||
"Lateral_Movement_Multiple_Attempts": "TA0008"
|
||||
"Persistence": "TA0003"
|
||||
"Privilege_Escalation": "TA0004"
|
||||
@@ -0,0 +1,15 @@
|
||||
"Collection": "https://attack.mitre.org/tactics/TA0009/"
|
||||
"Command_and_Control": "https://attack.mitre.org/tactics/TA0011/"
|
||||
"Credential_Access": "https://attack.mitre.org/tactics/TA0006/"
|
||||
"Defense_Evasion": "https://attack.mitre.org/tactics/TA0005/"
|
||||
"Discovery": "https://attack.mitre.org/tactics/TA0007/"
|
||||
"Execution": "https://attack.mitre.org/tactics/TA0002/"
|
||||
"Exfiltration": "https://attack.mitre.org/tactics/TA0010/"
|
||||
"Impact": "https://attack.mitre.org/tactics/TA0040/"
|
||||
"Initial_Access": "https://attack.mitre.org/tactics/TA0001/"
|
||||
"Lateral_Movement": "https://attack.mitre.org/tactics/TA0008/"
|
||||
"Lateral_Movement_and_Execution": [ "https://attack.mitre.org/tactics/TA0002/", "https://attack.mitre.org/tactics/TA0008/" ]
|
||||
"Lateral_Movement_Extracted_File": "https://attack.mitre.org/tactics/TA0008/"
|
||||
"Lateral_Movement_Multiple_Attempts": "https://attack.mitre.org/tactics/TA0008/"
|
||||
"Persistence": "https://attack.mitre.org/tactics/TA0003/"
|
||||
"Privilege_Escalation": "https://attack.mitre.org/tactics/TA0004/"
|
||||
@@ -0,0 +1,7 @@
|
||||
"EternalSafety": "Lexi Brent"
|
||||
"ATTACK": "MITRE"
|
||||
"HTTPATTACKS": "Andrew Klaus"
|
||||
"Corelight": "Corelight"
|
||||
"SNIFFPASS": "Andrew Klaus"
|
||||
"CVE_2020_0601": "Johanna Amann"
|
||||
"CVE_2020_13777": "Johanna Amann"
|
||||
@@ -0,0 +1,7 @@
|
||||
"EternalSafety": "BSD-3-Clause License"
|
||||
"ATTACK": " BSD-3-Clause License"
|
||||
"HTTPATTACKS": "BSD-2-Clause License"
|
||||
"SNIFFPASS": "BSD-3-Clause License"
|
||||
"Corelight": "https://github.com/corelight"
|
||||
"CVE_2020_0601": "https://raw.githubusercontent.com/0xxon/cve-2020-0601/master/COPYING"
|
||||
"CVE_2020_13777": "https://raw.githubusercontent.com/0xxon/cve-2020-13777/master/COPYING"
|
||||
@@ -0,0 +1,7 @@
|
||||
"EternalSafety": "https://github.com/0xl3x1/zeek-EternalSafety"
|
||||
"ATTACK": "https://github.com/mitre-attack/bzar"
|
||||
"HTTPATTACKS": "https://github.com/precurse/zeek-httpattacks"
|
||||
"Corelight": "https://github.com/corelight"
|
||||
"SNIFFPASS": "https://github.com/cybera/zeek-sniffpass"
|
||||
"CVE_2020_0601": "https://github.com/0xxon/cve-2020-0601"
|
||||
"CVE_2020_13777": "https://github.com/0xxon/cve-2020-13777"
|
||||
4
Vagrant/resources/malcolm/logstash/maps/ntp_modes.yaml
Normal file
4
Vagrant/resources/malcolm/logstash/maps/ntp_modes.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
"1": "client"
|
||||
"2": "server"
|
||||
"3": "peer"
|
||||
"4": "broadcast/multicast"
|
||||
206
Vagrant/resources/malcolm/logstash/maps/s7comm_result_codes.yaml
Normal file
206
Vagrant/resources/malcolm/logstash/maps/s7comm_result_codes.yaml
Normal file
@@ -0,0 +1,206 @@
|
||||
"0": "Success"
|
||||
"272": "Invalid block number"
|
||||
"273": "Invalid request length"
|
||||
"274": "Invalid parameter"
|
||||
"275": "Invalid block type"
|
||||
"276": "Block not found"
|
||||
"277": "Block already exists"
|
||||
"278": "Block is write-protected"
|
||||
"279": "The block/operating system update is too large"
|
||||
"280": "Invalid block number"
|
||||
"281": "Incorrect password entered"
|
||||
"282": "PG resource error"
|
||||
"283": "PLC resource error"
|
||||
"284": "Protocol error"
|
||||
"285": "Too many blocks (module-related restriction)"
|
||||
"286": "There is no longer a connection to the database, or S7DOS handle is invalid"
|
||||
"287": "Result buffer too small"
|
||||
"288": "End of block list"
|
||||
"320": "Insufficient memory available"
|
||||
"321": "Job cannot be processed because of a lack of resources"
|
||||
"32769": "The requested service cannot be performed while the block is in the current status"
|
||||
"32771": "S7 protocol error: Error occurred while transferring the block"
|
||||
"33024": "Application, general error: Service unknown to remote module"
|
||||
"33028": "This service is not implemented on the module or a frame error was reported"
|
||||
"33284": "The type specification for the object is inconsistent"
|
||||
"33285": "A copied block already exists and is not linked"
|
||||
"33537": "Insufficient memory space or work memory on the module, or specified storage medium not accessible"
|
||||
"33538": "Too few resources available or the processor resources are not available"
|
||||
"33540": "No further parallel upload possible. There is a resource bottleneck"
|
||||
"33541": "Function not available"
|
||||
"33542": "Insufficient work memory (for copying, linking, loading AWP)"
|
||||
"33543": "Not enough retentive work memory (for copying, linking, loading AWP)"
|
||||
"33793": "S7 protocol error: Invalid service sequence (for example, loading or uploading a block)"
|
||||
"33794": "Service cannot execute owing to status of the addressed object"
|
||||
"33796": "S7 protocol: The function cannot be performed"
|
||||
"33797": "Remote block is in DISABLE state (CFB). The function cannot be performed"
|
||||
"34048": "S7 protocol error: Wrong frames"
|
||||
"34051": "Alarm from the module: Service canceled prematurely"
|
||||
"34561": "Error addressing the object on the communications partner (for example, area length error)"
|
||||
"34562": "The requested service is not supported by the module"
|
||||
"34563": "Access to object refused"
|
||||
"34564": "Access error: Object damaged"
|
||||
"53249": "Protocol error: Illegal job number"
|
||||
"53250": "Parameter error: Illegal job variant"
|
||||
"53251": "Parameter error: Debugging function not supported by module"
|
||||
"53252": "Parameter error: Illegal job status"
|
||||
"53253": "Parameter error: Illegal job termination"
|
||||
"53254": "Parameter error: Illegal link disconnection ID"
|
||||
"53255": "Parameter error: Illegal number of buffer elements"
|
||||
"53256": "Parameter error: Illegal scan rate"
|
||||
"53257": "Parameter error: Illegal number of executions"
|
||||
"53258": "Parameter error: Illegal trigger event"
|
||||
"53259": "Parameter error: Illegal trigger condition"
|
||||
"53265": "Parameter error in path of the call environment: Block does not exist"
|
||||
"53266": "Parameter error: Wrong address in block"
|
||||
"53268": "Parameter error: Block being deleted/overwritten"
|
||||
"53269": "Parameter error: Illegal tag address"
|
||||
"53270": "Parameter error: Test jobs not possible, because of errors in user program"
|
||||
"53271": "Parameter error: Illegal trigger number"
|
||||
"53285": "Parameter error: Invalid path"
|
||||
"53286": "Parameter error: Illegal access type"
|
||||
"53287": "Parameter error: This number of data blocks is not permitted"
|
||||
"53297": "Internal protocol error"
|
||||
"53298": "Parameter error: Wrong result buffer length"
|
||||
"53299": "Protocol error: Wrong job length"
|
||||
"53311": "Coding error: Error in parameter section (for example, reserve bytes not equal to 0)"
|
||||
"53313": "Data error: Illegal status list ID"
|
||||
"53314": "Data error: Illegal tag address"
|
||||
"53315": "Data error: Referenced job not found, check job data"
|
||||
"53316": "Data error: Illegal tag value, check job data"
|
||||
"53317": "Data error: Exiting the ODIS control is not allowed in HOLD"
|
||||
"53318": "Data error: Illegal measuring stage during run-time measurement"
|
||||
"53319": "Data error: Illegal hierarchy in 'Read job list'"
|
||||
"53320": "Data error: Illegal deletion ID in 'Delete job'"
|
||||
"53321": "Invalid substitute ID in 'Replace job'"
|
||||
"53322": "Error executing 'program status'"
|
||||
"53343": "Coding error: Error in data section (for example, reserve bytes not equal to 0, ...)"
|
||||
"53345": "Resource error: No memory space for job"
|
||||
"53346": "Resource error: Job list full"
|
||||
"53347": "Resource error: Trigger event occupied"
|
||||
"53348": "Resource error: Not enough memory space for one result buffer element"
|
||||
"53349": "Resource error: Not enough memory space for several result buffer elements"
|
||||
"53350": "Resource error: The timer available for run-time measurement is occupied by another job"
|
||||
"53351": "Resource error: Too many 'modify tag' jobs active (in particular multi-processor operation)"
|
||||
"53377": "Function not permitted in current mode"
|
||||
"53378": "Mode error: Cannot exit HOLD mode"
|
||||
"53409": "Function not permitted in current protection level"
|
||||
"53410": "Function not possible at present, because a function is running that modifies memory"
|
||||
"53411": "Too many 'modify tag' jobs active on the I/O (in particular multi-processor operation)"
|
||||
"53412": "'Forcing' has already been established"
|
||||
"53413": "Referenced job not found"
|
||||
"53414": "Job cannot be disabled/enabled"
|
||||
"53415": "Job cannot be deleted, for example because it is currently being read"
|
||||
"53416": "Job cannot be replaced, for example because it is currently being read or deleted"
|
||||
"53417": "Job cannot be read, for example because it is currently being deleted"
|
||||
"53418": "Time limit exceeded in processing operation"
|
||||
"53419": "Invalid job parameters in process operation"
|
||||
"53420": "Invalid job data in process operation"
|
||||
"53421": "Operating mode already set"
|
||||
"53422": "The job was set up over a different connection and can only be handled over this connection"
|
||||
"53441": "At least one error has been detected while accessing the tag(s)"
|
||||
"53442": "Change to STOP/HOLD mode"
|
||||
"53443": "At least one error was detected while accessing the tag(s). Mode change to STOP/HOLD"
|
||||
"53444": "Timeout during run-time measurement"
|
||||
"53445": "Display of block stack inconsistent, because blocks were deleted/reloaded"
|
||||
"53446": "Job was automatically deleted as the jobs it referenced have been deleted"
|
||||
"53447": "The job was automatically deleted because STOP mode was exited"
|
||||
"53448": "'Block status' aborted because of inconsistencies between test job and running program"
|
||||
"53449": "Exit the status area by resetting OB90"
|
||||
"53450": "Exiting the status range by resetting OB90 and access error reading tags before exiting"
|
||||
"53451": "The output disable for the peripheral outputs has been activated again"
|
||||
"53452": "The amount of data for the debugging functions is restricted by the time limit"
|
||||
"53761": "Syntax error in block name"
|
||||
"53762": "Syntax error in function parameters"
|
||||
"53765": "Linked block already exists in RAM: Conditional copying is not possible"
|
||||
"53766": "Linked block already exists in EPROM: Conditional copying is not possible"
|
||||
"53768": "Maximum number of copied (not linked) blocks on module exceeded"
|
||||
"53769": "(At least) one of the given blocks not found on the module"
|
||||
"53770": "The maximum number of blocks that can be linked with one job was exceeded"
|
||||
"53771": "The maximum number of blocks that can be deleted with one job was exceeded"
|
||||
"53772": "OB cannot be copied because the associated priority class does not exist"
|
||||
"53773": "SDB cannot be interpreted (for example, unknown number)"
|
||||
"53774": "No (further) block available"
|
||||
"53775": "Module-specific maximum block size exceeded"
|
||||
"53776": "Invalid block number"
|
||||
"53778": "Incorrect header attribute (run-time relevant)"
|
||||
"53779": "Too many SDBs. Note the restrictions on the module being used"
|
||||
"53782": "Invalid user program - reset module"
|
||||
"53783": "Protection level specified in module properties not permitted"
|
||||
"53784": "Incorrect attribute (active/passive)"
|
||||
"53785": "Incorrect block lengths (for example, incorrect length of first section or of the whole block)"
|
||||
"53786": "Incorrect local data length or write-protection code faulty"
|
||||
"53787": "Module cannot compress or compression was interrupted early"
|
||||
"53789": "The volume of dynamic project data transferred is illegal"
|
||||
"53790": "Unable to assign parameters to a module (such as FM, CP). The system data could not be linked"
|
||||
"53792": "Invalid programming language. Note the restrictions on the module being used"
|
||||
"53793": "The system data for connections or routing are not valid"
|
||||
"53794": "The system data of the global data definition contain invalid parameters"
|
||||
"53795": "Error in instance data block for communication function block or maximum number of instance DBs exceeded"
|
||||
"53796": "The SCAN system data block contains invalid parameters"
|
||||
"53797": "The DP system data block contains invalid parameters"
|
||||
"53798": "A structural error occurred in a block"
|
||||
"53808": "A structural error occurred in a block"
|
||||
"53809": "At least one loaded OB cannot be copied because the associated priority class does not exist"
|
||||
"53810": "At least one block number of a loaded block is illegal"
|
||||
"53812": "Block exists twice in the specified memory medium or in the job"
|
||||
"53813": "The block contains an incorrect checksum"
|
||||
"53814": "The block does not contain a checksum"
|
||||
"53815": "You are about to load the block twice, i.e. a block with the same time stamp already exists on the CPU"
|
||||
"53816": "At least one of the blocks specified is not a DB"
|
||||
"53817": "At least one of the DBs specified is not available as a linked variant in the load memory"
|
||||
"53818": "At least one of the specified DBs is considerably different from the copied and linked variant"
|
||||
"53824": "Coordination rules violated"
|
||||
"53825": "The function is not permitted in the current protection level"
|
||||
"53826": "Protection violation while processing F blocks"
|
||||
"53840": "Update and module ID or version do not match"
|
||||
"53841": "Incorrect sequence of operating system components"
|
||||
"53842": "Checksum error"
|
||||
"53843": "No executable loader available; update only possible using a memory card"
|
||||
"53844": "Storage error in operating system"
|
||||
"53888": "Error compiling block in S7-300 CPU"
|
||||
"53921": "Another block function or a trigger on a block is active"
|
||||
"53922": "A trigger is active on a block. Complete the debugging function first"
|
||||
"53923": "The block is not active (linked), the block is occupied or the block is currently marked for deletion"
|
||||
"53924": "The block is already being processed by another block function"
|
||||
"53926": "It is not possible to save and change the user program simultaneously"
|
||||
"53927": "The block has the attribute 'unlinked' or is not processed"
|
||||
"53928": "An active debugging function is preventing parameters from being assigned to the CPU"
|
||||
"53929": "New parameters are being assigned to the CPU"
|
||||
"53930": "New parameters are currently being assigned to the modules"
|
||||
"53931": "The dynamic configuration limits are currently being changed"
|
||||
"53932": "A running active or deactivate assignment (SFC 12) is temporarily preventing R-KiR process"
|
||||
"53936": "An error occurred while configuring in RUN (CiR)"
|
||||
"53952": "The maximum number of technological objects has been exceeded"
|
||||
"53953": "The same technology data block already exists on the module"
|
||||
"53954": "Downloading the user program or downloading the hardware configuration is not possible"
|
||||
"54273": "Information function unavailable"
|
||||
"54274": "Information function unavailable"
|
||||
"54275": "Service has already been logged on/off (Diagnostics/PMC)"
|
||||
"54276": "Maximum number of nodes reached. No more logons possible for diagnostics/PMC"
|
||||
"54277": "Service not supported or syntax error in function parameters"
|
||||
"54278": "Required information currently unavailable"
|
||||
"54279": "Diagnostics error occurred"
|
||||
"54280": "Update aborted"
|
||||
"54281": "Error on DP bus"
|
||||
"54785": "Syntax error in function parameter"
|
||||
"54786": "Incorrect password entered"
|
||||
"54787": "The connection has already been legitimized"
|
||||
"54788": "The connection has already been enabled"
|
||||
"54789": "Legitimization not possible because password does not exist"
|
||||
"55297": "At least one tag address is invalid"
|
||||
"55298": "Specified job does not exist"
|
||||
"55299": "Illegal job status"
|
||||
"55300": "Illegal cycle time (illegal time base or multiple)"
|
||||
"55301": "No more cyclic read jobs can be set up"
|
||||
"55302": "The referenced job is in a state in which the requested function cannot be performed"
|
||||
"55303": "Function aborted due to overload, meaning executing the read cycle takes longer than the set scan cycle time"
|
||||
"56321": "Date and/or time invalid"
|
||||
"57857": "CPU is already the master"
|
||||
"57858": "Connect and update not possible due to different user program in flash module"
|
||||
"57859": "Connect and update not possible due to different firmware"
|
||||
"57860": "Connect and update not possible due to different memory configuration"
|
||||
"57861": "Connect/update aborted due to synchronization error"
|
||||
"57862": "Connect/update denied due to coordination violation"
|
||||
"61185": "S7 protocol error: Error at ID2; only 00H permitted in job"
|
||||
"61186": "S7 protocol error: Error at ID2; set of resources does not exist"
|
||||
@@ -0,0 +1,76 @@
|
||||
"100": "Trying"
|
||||
"180": "Ringing"
|
||||
"181": "Call Is Being Forwarded"
|
||||
"182": "Queued"
|
||||
"183": "Session Progress"
|
||||
"199": "Early Dialog Terminated"
|
||||
"200": "Success"
|
||||
"202": "Accepted"
|
||||
"204": "No Notification"
|
||||
"300": "Multiple Choices"
|
||||
"301": "Moved Permanently"
|
||||
"302": "Moved Temporarily"
|
||||
"305": "Use Proxy"
|
||||
"380": "Alternative Service"
|
||||
"400": "Bad Request"
|
||||
"401": "Unauthorized"
|
||||
"402": "Payment Required"
|
||||
"403": "Forbidden"
|
||||
"404": "Not Found"
|
||||
"405": "Method Not Allowed"
|
||||
"406": "Not Acceptable"
|
||||
"407": "Proxy Authentication Required"
|
||||
"408": "Request Timeout"
|
||||
"409": "Conflict"
|
||||
"410": "Gone"
|
||||
"411": "Length Required"
|
||||
"412": "Conditional Request Failed"
|
||||
"413": "Request Entity Too Large"
|
||||
"414": "Request URI Too Long"
|
||||
"415": "Unsupported Media Type"
|
||||
"416": "Unsupported URI Scheme"
|
||||
"417": "Unknown Resource-priority"
|
||||
"420": "Bad Extension"
|
||||
"421": "Extension Required"
|
||||
"422": "Session Interval Too Small"
|
||||
"423": "Interval Too Brief"
|
||||
"424": "Bad Location Information"
|
||||
"428": "Use Identity Header"
|
||||
"429": "Provide Referrer Identity"
|
||||
"430": "Flow Failed"
|
||||
"433": "Anonymity Disallowed"
|
||||
"436": "Bad Identity Info"
|
||||
"437": "Unsupported Certificate"
|
||||
"438": "Invalid Identity Header"
|
||||
"439": "First Hop Lacks Outbound Support"
|
||||
"440": "Max-breadth Exceeded"
|
||||
"469": "Bad Info Package"
|
||||
"470": "Consent Needed"
|
||||
"480": "Temporarily Unavailable"
|
||||
"481": "Call/transaction Does Not Exist"
|
||||
"482": "Loop Detected"
|
||||
"483": "Too Many Hops"
|
||||
"484": "Address Incomplete"
|
||||
"485": "Ambiguous"
|
||||
"486": "Busy Here"
|
||||
"487": "Request Terminated"
|
||||
"488": "Not Acceptable Here"
|
||||
"489": "Bad Event"
|
||||
"491": "Request Pending"
|
||||
"493": "Undecipherable"
|
||||
"494": "Security Agreement Required"
|
||||
"500": "Internal Server Error"
|
||||
"501": "Not Implemented"
|
||||
"502": "Bad Gateway"
|
||||
"503": "Service Unavailable"
|
||||
"504": "Server Time-out"
|
||||
"505": "Version Not Supported"
|
||||
"513": "Message Too Large"
|
||||
"555": "Push Notification Service Not Supported"
|
||||
"580": "Precondition Failure"
|
||||
"600": "Busy Everywhere"
|
||||
"603": "Decline"
|
||||
"604": "Does Not Exist Anywhere"
|
||||
"606": "Not Acceptable"
|
||||
"607": "Unwanted"
|
||||
"608": "Rejected"
|
||||
@@ -0,0 +1,40 @@
|
||||
"101": "The server is unable to connect"
|
||||
"111": "Connection refused or inability to open an SMTP stream"
|
||||
"200": "System status message or help reply"
|
||||
"214": "A response to the HELP command"
|
||||
"220": "The server is ready"
|
||||
"221": "The server is closing its transmission channel"
|
||||
"250": "Success"
|
||||
"251": "User not local will forward"
|
||||
"252": "Cannot verify the user, but it will try to deliver the message anyway"
|
||||
"354": "Start mail input"
|
||||
"420": "Timeout connection problem"
|
||||
"421": "Service is unavailable due to a connection problem"
|
||||
"422": "The recipient's mailbox has exceeded its storage limit"
|
||||
"431": "Not enough space on the disk"
|
||||
"432": "Recipient's incoming mail queue has been stopped"
|
||||
"441": "The recipient's server is not responding"
|
||||
"442": "The connection was dropped during the transmission"
|
||||
"446": "The maximum hop count was exceeded for the message"
|
||||
"447": "Message timed out because of issues concerning the incoming server"
|
||||
"449": "Routing error"
|
||||
"450": "User's mailbox is unavailable"
|
||||
"451": "Aborted – Local error in processing"
|
||||
"452": "Too many emails sent or too many recipients"
|
||||
"471": "An error of your mail server"
|
||||
"500": "Syntax error"
|
||||
"501": "Syntax error in parameters or arguments"
|
||||
"503": "Bad sequence of commands, or requires authentication"
|
||||
"504": "Command parameter is not implemented"
|
||||
"510": "Bad email address"
|
||||
"511": "Bad email address"
|
||||
"512": "Host server for the recipient's domain name cannot be found in DNS"
|
||||
"513": "Address type is incorrect"
|
||||
"523": "associated with encryption in RFC 5248"
|
||||
"530": "Authentication problem"
|
||||
"541": "The recipient address rejected your message"
|
||||
"550": "Non-existent email address"
|
||||
"551": "User not local or invalid address – relay denied"
|
||||
"552": "Exceeded storage allocation"
|
||||
"553": "Mailbox name invalid"
|
||||
"554": "Transaction has failed"
|
||||
@@ -0,0 +1,7 @@
|
||||
"1": "File not found"
|
||||
"2": "Access violation"
|
||||
"3": "Disk full or allocation exceeded"
|
||||
"4": "Illegal operation"
|
||||
"5": "Unknown transfer ID"
|
||||
"6": "File already exists"
|
||||
"7": "No such user"
|
||||
@@ -0,0 +1,84 @@
|
||||
"bacnet": ["ot", "network"]
|
||||
"bacnet_discovery": ["ot", "network"]
|
||||
"bacnet_property": ["ot", "network"]
|
||||
"bsap_ip_header": ["ot", "network"]
|
||||
"bsap_ip_rdb": ["ot", "network"]
|
||||
"bsap_ip_unknown": ["ot", "network"]
|
||||
"bsap_serial_header": ["ot", "network"]
|
||||
"bsap_serial_rdb": ["ot", "network"]
|
||||
"bsap_serial_rdb_ext": ["ot", "network"]
|
||||
"bsap_serial_unknown": ["ot", "network"]
|
||||
"cip": ["ot", "network"]
|
||||
"cip_identity": ["ot", "network"]
|
||||
"cip_io": ["ot", "network"]
|
||||
"conn": ["network"]
|
||||
"dce_rpc": ["network"]
|
||||
"dhcp": ["network"]
|
||||
"dnp3": ["ot", "network"]
|
||||
"dnp3_control": ["ot", "network"]
|
||||
"dnp3_objects": ["ot", "network"]
|
||||
"dns": ["network"]
|
||||
"dpd": ["network"]
|
||||
"enip": ["ot", "network"]
|
||||
"ecat_registers": ["ot", "network"]
|
||||
"ecat_log_address": ["ot", "network"]
|
||||
"ecat_dev_info": ["ot", "network"]
|
||||
"ecat_aoe_info": ["ot", "network"]
|
||||
"ecat_coe_info": ["ot", "network"]
|
||||
"ecat_foe_info": ["ot", "network"]
|
||||
"ecat_soe_info": ["ot", "network"]
|
||||
"ecat_arp_info": ["ot", "network"]
|
||||
"files": ["file"]
|
||||
"ftp": ["file", "network"]
|
||||
"gquic": ["network"]
|
||||
"http": ["web", "network"]
|
||||
"intel": ["intrusion_detection", "network"]
|
||||
"ipsec": ["network"]
|
||||
"irc": ["network"]
|
||||
"iso_cotp": ["ot", "network"]
|
||||
"kerberos": ["authentication", "iam", "network"]
|
||||
"known_certs": ["file"]
|
||||
"known_hosts": ["network"]
|
||||
"known_modbus": ["ot", "network"]
|
||||
"known_services": ["network"]
|
||||
"ldap": ["authentication", "iam", "network"]
|
||||
"login": ["authentication", "network"]
|
||||
"modbus": ["ot", "network"]
|
||||
"modbus_detailed": ["ot", "network"]
|
||||
"modbus_mask_write_register": ["ot", "network"]
|
||||
"modbus_read_write_multiple_registers": ["ot", "network"]
|
||||
"modbus_register_change": ["ot", "network"]
|
||||
"mqtt_connect": ["network"]
|
||||
"mqtt_publish": ["network"]
|
||||
"mqtt_subscribe": ["network"]
|
||||
"mysql": ["database", "network"]
|
||||
"notice": ["intrusion_detection", "network"]
|
||||
"ntlm": ["authentication", "iam", "network"]
|
||||
"ntp": ["network"]
|
||||
"openvpn": ["network"]
|
||||
"pe": ["file"]
|
||||
"profinet": ["ot", "network"]
|
||||
"profinet_dce_rpc": ["ot", "network"]
|
||||
"radius": ["authentication", "iam", "network"]
|
||||
"rdp": ["network"]
|
||||
"rfb": ["network"]
|
||||
"s7comm": ["ot", "network"]
|
||||
"signatures": ["malware", "intrusion_detection", "network"]
|
||||
"sip": ["network"]
|
||||
"smb_cmd": ["network"]
|
||||
"smb_files": ["file", "network"]
|
||||
"smb_mapping": ["file", "network"]
|
||||
"smtp": ["network"]
|
||||
"snmp": ["network"]
|
||||
"socks": ["network"]
|
||||
"software": ["network"]
|
||||
"ssh": ["authentication", "network"]
|
||||
"ssl": ["network"]
|
||||
"syslog": ["network"]
|
||||
"tds": ["database", "network"]
|
||||
"tds_rpc": ["database", "network"]
|
||||
"tds_sql_batch": ["database", "network"]
|
||||
"tunnel": ["network"]
|
||||
"weird": ["intrusion_detection", "network"]
|
||||
"wireguard": ["network"]
|
||||
"x509": ["file"]
|
||||
@@ -0,0 +1,6 @@
|
||||
input {
|
||||
pipeline {
|
||||
address => "log-enrichment"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,444 @@
|
||||
filter {
|
||||
|
||||
# todo: make added fields not zeek-specific? (see ECS topic branch)
|
||||
# all the lookups are done here, but some of them are still zeek-specific which
|
||||
# IMO isn't the cleanest. could be refactored/split.
|
||||
|
||||
######## MAC address OUI (manufacturer) lookup #################################################
|
||||
|
||||
# if OUI lookup is requested, enable it
|
||||
mutate {
|
||||
id => "mutate_add_field_env_logstash_oui_lookup"
|
||||
add_field => { "[@metadata][ENV_LOGSTASH_OUI_LOOKUP]" => "${LOGSTASH_OUI_LOOKUP:false}" }
|
||||
}
|
||||
if ([@metadata][ENV_LOGSTASH_OUI_LOOKUP] == "true") {
|
||||
|
||||
# srcMac/dstMac are arrays at this point, as Arkime expects them to be
|
||||
|
||||
if ([srcMac]) and ([srcMac][0]) {
|
||||
|
||||
# attempt lookup of srcMac oui
|
||||
ieee_oui {
|
||||
id => "ieee_oui_srcMac"
|
||||
source => "[srcMac][0]"
|
||||
target => "[zeek][orig_l2_oui]"
|
||||
ouifile => "/usr/share/logstash/config/oui-logstash.txt"
|
||||
refresh_interval => 0
|
||||
}
|
||||
|
||||
if ([zeek][orig_l2_oui]) {
|
||||
|
||||
# merge orig_l2_oui into srcOui array (with a count of 1)
|
||||
mutate { id => "mutate_merge_field_srcOui"
|
||||
merge => { "[srcOui]" => "[zeek][orig_l2_oui]" } }
|
||||
mutate { id => "mutate_add_field_srcOuiCnt"
|
||||
add_field => { "[srcOuiCnt]" => "1" } }
|
||||
|
||||
# if this is a DHCP log type, copy srcOui to dhcp.oui
|
||||
if ([dhcp][mac]) {
|
||||
mutate {
|
||||
id => "mutate_add_fields_dhcp_oui"
|
||||
add_field => { "[dhcp][oui]" => "%{[srcOui]}" }
|
||||
}
|
||||
mutate {
|
||||
id => "mutate_add_fields_dhcp_ouiCnt"
|
||||
add_field => { "[dhcp][ouiCnt]" => "%{[srcOuiCnt]}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} # end if [srcMac]
|
||||
|
||||
if ([dstMac]) and ([dstMac][0]) {
|
||||
|
||||
# attempt lookup of dstMac oui
|
||||
ieee_oui {
|
||||
id => "ieee_oui_dstMac"
|
||||
source => "[dstMac][0]"
|
||||
target => "[zeek][resp_l2_oui]"
|
||||
ouifile => "/usr/share/logstash/config/oui-logstash.txt"
|
||||
refresh_interval => 0
|
||||
}
|
||||
|
||||
if ([zeek][resp_l2_oui]) {
|
||||
|
||||
# merge resp_l2_oui into dstOui array (with a count of 1)
|
||||
mutate { id => "mutate_merge_field_dstOui"
|
||||
merge => { "[dstOui]" => "[zeek][resp_l2_oui]" } }
|
||||
mutate { id => "mutate_add_field_dstOuiCnt"
|
||||
add_field => { "[dstOuiCnt]" => "1" } }
|
||||
}
|
||||
|
||||
} # end if [dstMac]
|
||||
|
||||
} # end if ENV_LOGSTASH_OUI_LOOKUP
|
||||
################################################################################################
|
||||
|
||||
######## IP address class tagging, GeoIP/ASN lookups, and reverse DNS ###########################
|
||||
if ([srcIp]) {
|
||||
cidr {
|
||||
id => "cidr_add_tag_internal_source"
|
||||
add_tag => [ "internal_source" ]
|
||||
address => [ "%{srcIp}" ]
|
||||
network => [ "0.0.0.0/8", "10.0.0.0/8", "100.64.0.0/10", "127.0.0.0/8", "169.254.0.0/16", "172.16.0.0/12", "192.0.0.0/24", "192.0.2.0/24",
|
||||
"192.88.99.0/24", "192.168.0.0/16", "198.18.0.0/15", "198.51.100.0/24", "203.0.113.0/24", "224.0.0.0/4", "240.0.0.0/4",
|
||||
"255.255.255.255/32", "::/0", "::/128", "::1/128", "fc00::/7", "fe80::/10", "ff00::/8"]
|
||||
}
|
||||
if (!("internal_source" in [tags])) {
|
||||
mutate { id => "mutate_add_tag_external_source"
|
||||
add_tag => [ "external_source" ] }
|
||||
# map srcIp to GEO countries
|
||||
geoip {
|
||||
id => "geoip_srcIp_geo"
|
||||
source => "[srcIp]"
|
||||
target => "[zeek][source_geo]"
|
||||
}
|
||||
geoip {
|
||||
id => "geoip_srcIp_asn"
|
||||
default_database_type => "ASN"
|
||||
source => "[srcIp]"
|
||||
target => "[@metadata][orig_asn]"
|
||||
}
|
||||
if ([zeek][source_geo] and [zeek][source_geo][country_code2]) {
|
||||
mutate { id => "mutate_add_field_srcGEO"
|
||||
add_field => { "[srcGEO]" => "%{[zeek][source_geo][country_code2]}" } }
|
||||
}
|
||||
if ([@metadata][orig_asn] and [@metadata][orig_asn][as_org] and [@metadata][orig_asn][asn]) {
|
||||
mutate { id => "mutate_add_field_srcASN"
|
||||
add_field => { "[srcASN]" => "AS%{[@metadata][orig_asn][asn]} %{[@metadata][orig_asn][as_org]}" } }
|
||||
}
|
||||
|
||||
# if reverse DNS for public IP addresses is enabled (via environment variable) do it
|
||||
mutate {
|
||||
add_field => { "[@metadata][ENV_LOGSTASH_REVERSE_DNS]" => "${LOGSTASH_REVERSE_DNS:false}" }
|
||||
}
|
||||
if ([@metadata][ENV_LOGSTASH_REVERSE_DNS] == "true") {
|
||||
mutate {
|
||||
id => "mutate_add_field_srcip_for_dns"
|
||||
add_field => {
|
||||
"[zeek][source_ip_reverse_dns]" => "%{[srcIp]}"
|
||||
}
|
||||
}
|
||||
dns {
|
||||
id => "dns_source_ip_reverse_dns"
|
||||
reverse => [ "[zeek][source_ip_reverse_dns]" ]
|
||||
action => "replace"
|
||||
hit_cache_size => 8000
|
||||
hit_cache_ttl => 300
|
||||
failed_cache_size => 8000
|
||||
failed_cache_ttl => 60
|
||||
}
|
||||
if ([srcIp] == [zeek][source_ip_reverse_dns]) {
|
||||
mutate {
|
||||
id => "mutate_remove_field_source_ip_reverse_dns"
|
||||
remove_field => [ "[zeek][source_ip_reverse_dns]" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
cidr {
|
||||
id => "cidr_detect_network_type_ipv4_source"
|
||||
add_field => { "[network][type]" => "ipv4" }
|
||||
address => [ "%{srcIp}" ]
|
||||
network => [ "0.0.0.0/0" ]
|
||||
}
|
||||
if (![network][type]) {
|
||||
mutate { id => "mutate_add_network_type_ipv4_source"
|
||||
add_field => { "[network][type]" => "ipv6" } }
|
||||
}
|
||||
} # if ([srcIp])
|
||||
|
||||
if ([dstIp]) {
|
||||
cidr {
|
||||
id => "cidr_add_tag_internal_destination"
|
||||
add_tag => [ "internal_destination" ]
|
||||
address => [ "%{dstIp}" ]
|
||||
network => [ "0.0.0.0/8", "10.0.0.0/8", "100.64.0.0/10", "127.0.0.0/8", "169.254.0.0/16", "172.16.0.0/12", "192.0.0.0/24", "192.0.2.0/24",
|
||||
"192.88.99.0/24", "192.168.0.0/16", "198.18.0.0/15", "198.51.100.0/24", "203.0.113.0/24", "224.0.0.0/4", "240.0.0.0/4",
|
||||
"255.255.255.255/32", "::/0", "::/128", "::1/128", "fc00::/7", "fe80::/10", "ff00::/8"]
|
||||
}
|
||||
if (!("internal_destination" in [tags])) {
|
||||
mutate { id => "mutate_add_tag_external_destination"
|
||||
add_tag => [ "external_destination" ] }
|
||||
# map dstIp to GEO countries
|
||||
geoip {
|
||||
id => "geoip_dstIp_geo"
|
||||
source => "[dstIp]"
|
||||
target => "[zeek][destination_geo]"
|
||||
}
|
||||
geoip {
|
||||
id => "geoip_dstIp_asn"
|
||||
default_database_type => "ASN"
|
||||
source => "[dstIp]"
|
||||
target => "[@metadata][resp_asn]"
|
||||
}
|
||||
if ([zeek][destination_geo] and [zeek][destination_geo][country_code2]) {
|
||||
mutate { id => "mutate_add_field_dstGEO"
|
||||
add_field => { "[dstGEO]" => "%{[zeek][destination_geo][country_code2]}" } }
|
||||
}
|
||||
if ([@metadata][resp_asn] and [@metadata][resp_asn][as_org] and [@metadata][resp_asn][asn]) {
|
||||
mutate { id => "mutate_add_field_dstASN"
|
||||
add_field => { "[dstASN]" => "AS%{[@metadata][resp_asn][asn]} %{[@metadata][resp_asn][as_org]}" } }
|
||||
}
|
||||
|
||||
# if reverse DNS for public IP addresses is enabled (via environment variable) do it
|
||||
if (![@metadata][ENV_LOGSTASH_REVERSE_DNS]) {
|
||||
mutate {
|
||||
add_field => { "[@metadata][ENV_LOGSTASH_REVERSE_DNS]" => "${LOGSTASH_REVERSE_DNS:false}" }
|
||||
}
|
||||
}
|
||||
if ([@metadata][ENV_LOGSTASH_REVERSE_DNS] == "true") {
|
||||
mutate {
|
||||
id => "mutate_add_field_dstip_for_dns"
|
||||
add_field => {
|
||||
"[zeek][destination_ip_reverse_dns]" => "%{[dstIp]}"
|
||||
}
|
||||
}
|
||||
dns {
|
||||
id => "dns_destination_ip_reverse_dns"
|
||||
reverse => [ "[zeek][destination_ip_reverse_dns]" ]
|
||||
action => "replace"
|
||||
hit_cache_size => 8000
|
||||
hit_cache_ttl => 300
|
||||
failed_cache_size => 8000
|
||||
failed_cache_ttl => 60
|
||||
}
|
||||
if ([dstIp] == [zeek][destination_ip_reverse_dns]) {
|
||||
mutate {
|
||||
id => "mutate_remove_field_destination_ip_reverse_dns"
|
||||
remove_field => [ "[zeek][destination_ip_reverse_dns]" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (![network][type]) {
|
||||
cidr {
|
||||
id => "cidr_detect_network_type_ipv4_dest"
|
||||
add_field => { "[network][type]" => "ipv4" }
|
||||
address => [ "%{dstIp}" ]
|
||||
network => [ "0.0.0.0/0" ]
|
||||
}
|
||||
if (![network][type]) {
|
||||
mutate { id => "mutate_add_network_type_ipv4_dest"
|
||||
add_field => { "[network][type]" => "ipv6" } }
|
||||
}
|
||||
}
|
||||
} # if ([dstIp])
|
||||
|
||||
if ([dns][host]) {
|
||||
# if requested, look up DNS queries using freq_server.py to get entropy scores
|
||||
|
||||
ruby {
|
||||
id => "ruby_dns_freq_lookup"
|
||||
init => "
|
||||
require 'net/http'
|
||||
require 'cgi'
|
||||
$freqLookupEnabled = ENV['FREQ_LOOKUP'] || 'false'
|
||||
"
|
||||
# freq_server.py returns a string like: (2.9333, 3.6353)
|
||||
code => "
|
||||
if ($freqLookupEnabled == 'true') then
|
||||
scoresv1 = Array.new
|
||||
scoresv2 = Array.new
|
||||
scoresTmp = Array.new
|
||||
begin
|
||||
event.get('[dns][host]').each { |query|
|
||||
if (query.length >= 4) and (query !~ /(ip6\.int|ip6\.arpa|in-addr\.arpa|b32\.i2p)$/i) then
|
||||
scoresTmp.clear
|
||||
scoresTmp.concat(Net::HTTP.get_response(URI.parse('http://freq:10004/measure/' + CGI.escape(query))).body.gsub(/(^\(|\)$|\s+)/, '').split(',').map(&:to_f))
|
||||
if (scoresTmp.length == 2) then
|
||||
scoresv1 << scoresTmp[0]
|
||||
scoresv2 << scoresTmp[1]
|
||||
end
|
||||
end
|
||||
}
|
||||
rescue Exception => e
|
||||
event.set('ruby_exception', 'ruby_dns_freq_lookup: ' + e.message)
|
||||
end
|
||||
event.set('[zeek][freq_score_v1]', scoresv1) unless (scoresv1.length == 0)
|
||||
event.set('[zeek][freq_score_v2]', scoresv2) unless (scoresv2.length == 0)
|
||||
end"
|
||||
}
|
||||
} # end if dns.ip
|
||||
|
||||
if ([dns][ip]) and ([dns][ip][0]) {
|
||||
# if this is a DNS record with an IP, GeoIP it as well
|
||||
geoip {
|
||||
id => "geoip_dns_ip_asn"
|
||||
default_database_type => "ASN"
|
||||
source => "[dns][ip][0]"
|
||||
target => "[@metadata][dns_asn]"
|
||||
}
|
||||
if ([@metadata][dns_asn] and [@metadata][dns_asn][as_org] and [@metadata][dns_asn][asn]) {
|
||||
# this is stupid, the %{} doesn't seem to be liked by mutate.merge
|
||||
mutate { id => "mutate_add_field_dns_asn"
|
||||
add_field => { "[@metadata][asn_str]" => "AS%{[@metadata][dns_asn][asn]} %{[@metadata][dns_asn][as_org]}" } }
|
||||
mutate { id => "mutate_merge_dns_asn"
|
||||
merge => { "[dns][ASN]" => "[@metadata][asn_str]" } }
|
||||
}
|
||||
|
||||
geoip {
|
||||
id => "geoip_dns_ip_geo"
|
||||
source => "[dns][ip][0]"
|
||||
target => "[@metadata][dns_geo]"
|
||||
}
|
||||
if ([@metadata][dns_geo] and [@metadata][dns_geo][country_code2]) {
|
||||
mutate { id => "mutate_merge_dns_geo"
|
||||
merge => { "[dns][GEO]" => "[@metadata][dns_geo][country_code2]" } }
|
||||
}
|
||||
} # end if dns.ip
|
||||
|
||||
if ([radius]) {
|
||||
|
||||
# if this is a Radius record with IP addresses, GeoIP them as well
|
||||
if ([radius][framedIp]) and ([radius][framedIp][0]) {
|
||||
geoip {
|
||||
id => "geoip_radius_framedIp_asn"
|
||||
default_database_type => "ASN"
|
||||
source => "[radius][framedIp][0]"
|
||||
target => "[@metadata][radius_asn]"
|
||||
}
|
||||
if ([@metadata][radius_asn] and [@metadata][radius_asn][as_org] and [@metadata][radius_asn][asn]) {
|
||||
# this is stupid, the %{} doesn't seem to be liked by mutate.merge
|
||||
mutate { id => "mutate_add_field_radius_asn"
|
||||
add_field => { "[@metadata][asn_str]" => "AS%{[@metadata][radius_asn][asn]} %{[@metadata][radius_asn][as_org]}" } }
|
||||
mutate { id => "mutate_merge_radius_asn"
|
||||
merge => { "[radius][framedASN]" => "[@metadata][asn_str]" } }
|
||||
}
|
||||
geoip {
|
||||
id => "geoip_radius_framedIp_geo"
|
||||
source => "[radius][framedIp][0]"
|
||||
target => "[@metadata][radius_geo]"
|
||||
}
|
||||
if ([@metadata][radius_geo] and [@metadata][radius_geo][country_code2]) {
|
||||
mutate { id => "mutate_merge_radius_geo"
|
||||
merge => { "[radius][framedGEO]" => "[@metadata][radius_geo][country_code2]" } }
|
||||
}
|
||||
} # end if radius.framedIp
|
||||
|
||||
if ([radius][endpointIp]) and ([radius][endpointIp][0]) {
|
||||
geoip {
|
||||
id => "geoip_radius_endpointIp_asn"
|
||||
default_database_type => "ASN"
|
||||
source => "[radius][endpointIp][0]"
|
||||
target => "[@metadata][radius_asn]"
|
||||
}
|
||||
if ([@metadata][radius_asn] and [@metadata][radius_asn][as_org] and [@metadata][radius_asn][asn]) {
|
||||
# this is stupid, the %{} doesn't seem to be liked by mutate.merge
|
||||
mutate { id => "mutate_add_field_radius_endpoint_asn"
|
||||
add_field => { "[@metadata][asn_str]" => "AS%{[@metadata][radius_asn][asn]} %{[@metadata][radius_asn][as_org]}" } }
|
||||
mutate { id => "mutate_merge_radius_endpoint_asn"
|
||||
merge => { "[radius][endpointASN]" => "[@metadata][asn_str]" } }
|
||||
}
|
||||
|
||||
geoip {
|
||||
id => "geoip_radius_endpointIp_geo"
|
||||
source => "[radius][endpointIp][0]"
|
||||
target => "[@metadata][radius_geo]"
|
||||
}
|
||||
if ([@metadata][radius_geo] and [@metadata][radius_geo][country_code2]) {
|
||||
mutate { id => "mutate_merge_radius_endpoint_geo"
|
||||
merge => { "[radius][endpointGEO]" => "[@metadata][radius_geo][country_code2]" } }
|
||||
}
|
||||
} # end if radius.endpointIp
|
||||
|
||||
} # end if radius
|
||||
|
||||
if ([zeek_cip_identity][socket_address]) {
|
||||
|
||||
# if this is a zeek_cip_identity record with socket_address, ASN/GeoIP it as well
|
||||
geoip {
|
||||
id => "geoip_zeek_cip_identity_socket_address"
|
||||
default_database_type => "ASN"
|
||||
source => "[zeek_cip_identity][socket_address]"
|
||||
target => "[@metadata][zeek_cip_identity_asn]"
|
||||
}
|
||||
if ([@metadata][zeek_cip_identity_asn] and [@metadata][zeek_cip_identity_asn][as_org] and [@metadata][zeek_cip_identity_asn][asn]) {
|
||||
# this is stupid, the %{} doesn't seem to be liked by mutate.merge
|
||||
mutate { id => "mutate_add_field_zeek_cip_identity_asn"
|
||||
add_field => { "[@metadata][cip_asn_str]" => "AS%{[@metadata][zeek_cip_identity_asn][asn]} %{[@metadata][zeek_cip_identity_asn][as_org]}" } }
|
||||
mutate { id => "mutate_merge_zeek_cip_identity_asn"
|
||||
merge => { "[zeek_cip_identity][socket_address_asn]" => "[@metadata][cip_asn_str]" } }
|
||||
}
|
||||
geoip {
|
||||
id => "geoip_zeek_cip_identity_socket_address_geo"
|
||||
source => "[zeek_cip_identity][socket_address]"
|
||||
target => "[zeek_cip_identity][socket_address_geo]"
|
||||
}
|
||||
|
||||
} # end if zeek_cip_identity.socket_address
|
||||
|
||||
if ([zeek_ssl][server_name]) {
|
||||
mutate {
|
||||
add_field => { "[@metadata][ENV_FREQ_LOOKUP]" => "${FREQ_LOOKUP:false}" }
|
||||
}
|
||||
if ([@metadata][ENV_FREQ_LOOKUP] == "true") {
|
||||
# if requested, look up zeek_ssl.server_name queries using freq_server.py to get entropy scores
|
||||
http {
|
||||
id => "rest_zeek_ssl_server_name_freq_lookup"
|
||||
url => "http://freq:10004/measure/%{[zeek_ssl][server_name]}"
|
||||
target_body => "[@metadata][zeek_ssl_server_name_freq]"
|
||||
}
|
||||
if ([@metadata][zeek_ssl_server_name_freq]) {
|
||||
grok {
|
||||
id => "grok_zeek_ssl_server_name_freq_parse"
|
||||
match => { "[@metadata][zeek_ssl_server_name_freq]" => [ "^\(%{NUMBER:[zeek][freq_score_v1]}, %{NUMBER:[zeek][freq_score_v2]}\)$" ] }
|
||||
}
|
||||
}
|
||||
}
|
||||
} # end if zeek_ssl.server_name
|
||||
|
||||
################################################################################################
|
||||
|
||||
######## JA3 community hashes lookup ###########################################################
|
||||
# ja3/ja3s are arrays at this point, as Arkime expects them to be
|
||||
|
||||
if ([tls][ja3]) and ([tls][ja3][0]) {
|
||||
translate {
|
||||
id => "translate_ssl_ja3"
|
||||
field => "[tls][ja3][0]"
|
||||
destination => "[zeek_ssl][ja3_desc]"
|
||||
dictionary_path => "/etc/ja3.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
if ([tls][ja3s]) and ([tls][ja3s][0]) {
|
||||
translate {
|
||||
id => "translate_ssl_ja3s"
|
||||
field => "[tls][ja3s][0]"
|
||||
destination => "[zeek_ssl][ja3s_desc]"
|
||||
dictionary_path => "/etc/ja3.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################################
|
||||
|
||||
# this identifies which node the log came from
|
||||
if ([host][name]) {
|
||||
mutate { id => "mutate_add_field_host_name_node"
|
||||
add_field => { "[node]" => "%{[host][name]}" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_logstash_node"
|
||||
add_field => { "[node]" => "logs" } }
|
||||
}
|
||||
|
||||
if ([log][file][path]) {
|
||||
# trim path portion of log.file.path
|
||||
mutate { id => "mutate_gsub_field_zeek_log_file_path"
|
||||
gsub => [ "[log][file][path]", "^.*/", "" ] }
|
||||
}
|
||||
|
||||
# remove some useless beats-related fields
|
||||
mutate {
|
||||
id => "mutate_remove_field_beats_useless"
|
||||
remove_field => [
|
||||
"[beat]",
|
||||
"[agent][ephemeral_id]",
|
||||
"[log][offset]",
|
||||
"[input][type]",
|
||||
"[prospector]",
|
||||
"[message]"
|
||||
]
|
||||
}
|
||||
|
||||
} # end Filter
|
||||
@@ -0,0 +1,79 @@
|
||||
filter {
|
||||
|
||||
# set data types for fields that belong to multiple types of logs
|
||||
# _dataconversion tag (for missing fields) will be removed in 18_tags_finalize.conf
|
||||
mutate {
|
||||
id => "mutate_convert_misc"
|
||||
convert => {
|
||||
"[certCnt]" => "integer"
|
||||
"[dhcp][hostCnt]" => "integer"
|
||||
"[dhcp][idCnt]" => "integer"
|
||||
"[dhcp][macCnt]" => "integer"
|
||||
"[dhcp][ouiCnt]" => "integer"
|
||||
"[dns][hostCnt]" => "integer"
|
||||
"[dns][ipCnt]" => "integer"
|
||||
"[dns][opcodeCnt]" => "integer"
|
||||
"[dns][qcCnt]" => "integer"
|
||||
"[dns][qtCnt]" => "integer"
|
||||
"[dstBytes]" => "integer"
|
||||
"[dstDataBytes]" => "integer"
|
||||
"[dstMacCnt]" => "integer"
|
||||
"[dstOuiCnt]" => "integer"
|
||||
"[dstPackets]" => "integer"
|
||||
"[dstPort]" => "integer"
|
||||
"[email][dstCnt]" => "integer"
|
||||
"[email][idCnt]" => "integer"
|
||||
"[email][srcCnt]" => "integer"
|
||||
"[email][subjectCnt]" => "integer"
|
||||
"[email][useragentCnt]" => "integer"
|
||||
"[email][smtpHelloCnt]" => "integer"
|
||||
"[firstPacket]" => "integer"
|
||||
"[http][bodyMagicCnt]" => "integer"
|
||||
"[http][clientVersionCnt]" => "integer"
|
||||
"[http][hostCnt]" => "integer"
|
||||
"[http][methodCnt]" => "integer"
|
||||
"[http][statuscodeCnt]" => "integer"
|
||||
"[http][uriCnt]" => "integer"
|
||||
"[http][useragentCnt]" => "integer"
|
||||
"[ipProtocol]" => "integer"
|
||||
"[irc][channelCnt]" => "integer"
|
||||
"[irc][nickCnt]" => "integer"
|
||||
"[krb5][cnameCnt]" => "integer"
|
||||
"[krb5][snameCnt]" => "integer"
|
||||
"[lastPacket]" => "integer"
|
||||
"[length]" => "integer"
|
||||
"[protocolCnt]" => "integer"
|
||||
"[quic][hostCnt]" => "integer"
|
||||
"[quic][useragentCnt]" => "integer"
|
||||
"[quic][versionCnt]" => "integer"
|
||||
"[radius][endpointegerIpCnt]" => "integer"
|
||||
"[radius][framedIpCnt]" => "integer"
|
||||
"[radius][macCnt]" => "integer"
|
||||
"[segmentCnt]" => "integer"
|
||||
"[srcBytes]" => "integer"
|
||||
"[srcDataBytes]" => "integer"
|
||||
"[srcMacCnt]" => "integer"
|
||||
"[srcOuiCnt]" => "integer"
|
||||
"[srcPackets]" => "integer"
|
||||
"[srcPort]" => "integer"
|
||||
"[ssh][hasshCnt]" => "integer"
|
||||
"[ssh][hasshServerCnt]" => "integer"
|
||||
"[ssh][keyCnt]" => "integer"
|
||||
"[ssh][versionCnt]" => "integer"
|
||||
"[timestamp]" => "integer"
|
||||
"[tls][cipherCnt]" => "integer"
|
||||
"[tls][ja3Cnt]" => "integer"
|
||||
"[tls][ja3sCnt]" => "integer"
|
||||
"[tls][versionCnt]" => "integer"
|
||||
"[totBytes]" => "integer"
|
||||
"[totDataBytes]" => "integer"
|
||||
"[totPackets]" => "integer"
|
||||
"[userCnt]" => "integer"
|
||||
"[vlan]" => "integer"
|
||||
"[vlanCnt]" => "integer"
|
||||
"[zeek][freq_score_v1]" => "float"
|
||||
"[zeek][freq_score_v2]" => "float"
|
||||
}
|
||||
}
|
||||
|
||||
} # end Filter
|
||||
@@ -0,0 +1,12 @@
|
||||
filter {
|
||||
|
||||
if ([zeek][resp_segment] and [zeek][orig_segment]) and
|
||||
([zeek][resp_segment] != [zeek][orig_segment]) {
|
||||
mutate {
|
||||
id => "mutate_add_tag_cross_segment"
|
||||
add_tag => [ "cross_segment" ]
|
||||
}
|
||||
}
|
||||
|
||||
} # filter
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
filter {
|
||||
|
||||
# remove tags we'd rather not see
|
||||
|
||||
mutate { id => "mutate_enrichment_tags_remove"
|
||||
remove_tag => [ "beats_input_codec_plain_applied",
|
||||
"_dateparsefailure",
|
||||
"_grokparsefailure",
|
||||
"_jsonparsefailure",
|
||||
"_dissectfailure",
|
||||
"_ouilookupfailure",
|
||||
"_geoip_lookup_failure" ] }
|
||||
|
||||
# deduplicate tags
|
||||
|
||||
ruby {
|
||||
id => "ruby_zeek_tags_deduplicate"
|
||||
code => "event.set('[tags]', event.get('[tags]').uniq)"
|
||||
}
|
||||
|
||||
# count tags (for moloch)
|
||||
|
||||
ruby {
|
||||
id => "ruby_enrichment_tagsCnt"
|
||||
code => "event.set('[tagsCnt]', event.get('[tags]').length)"
|
||||
}
|
||||
mutate {
|
||||
id => "mutate_convert_enrichment_tagsCnt"
|
||||
convert => { "[tagsCnt]" => "integer" }
|
||||
}
|
||||
|
||||
} # filter
|
||||
|
||||
@@ -0,0 +1,69 @@
|
||||
filter {
|
||||
|
||||
# Map enriched fields to ECS where possible (see https://github.com/idaholab/Malcolm/issues/16)
|
||||
# For now I will add fields rather than rename them. This will preserve backwards compatibility
|
||||
# but the records will be somewhat bigger. I'll have to address what (if anything) to do with upgrades.
|
||||
|
||||
# for now don't do anything unles an env explicitly enables it
|
||||
mutate {
|
||||
id => "mutate_add_field_env_logstash_enriched_to_ecs"
|
||||
add_field => { "[@metadata][ENV_LOGSTASH_ENRICHED_TO_ECS]" => "${LOGSTASH_TO_ECS:false}" }
|
||||
}
|
||||
if ([@metadata][ENV_LOGSTASH_ENRICHED_TO_ECS] == "true") {
|
||||
|
||||
# 🗹 Network - Fields describing the communication path over which the event happened. - https://www.elastic.co/guide/en/ecs/current/ecs-network.html
|
||||
|
||||
# network.direction (from tags assigned during 11_lookups.conf)
|
||||
if ("internal_source" in [tags]) and ("internal_destination" in [tags]) {
|
||||
mutate { id => "mutate_add_field_metadata_network_direction_internal"
|
||||
add_field => { "[@metadata][network_direction]" => "internal" } }
|
||||
} else if ("external_source" in [tags]) and ("external_destination" in [tags]) {
|
||||
mutate { id => "mutate_add_field_metadata_network_direction_external"
|
||||
add_field => { "[@metadata][network_direction]" => "external" } }
|
||||
} else if ("internal_source" in [tags]) and ("external_destination" in [tags]) {
|
||||
mutate { id => "mutate_add_field_metadata_network_direction_outbound"
|
||||
add_field => { "[@metadata][network_direction]" => "outbound" } }
|
||||
} else if ("external_source" in [tags]) and ("internal_destination" in [tags]) {
|
||||
mutate { id => "mutate_add_field_metadata_network_direction_inbound"
|
||||
add_field => { "[@metadata][network_direction]" => "inbound" } }
|
||||
}
|
||||
if ([@metadata][network_direction]) {
|
||||
mutate { id => "mutate_add_field_ecs_network_direction"
|
||||
add_field => { "[network][direction]" => "%{[@metadata][network_direction]}" } }
|
||||
}
|
||||
|
||||
# network.name (based on info from [zeek][resp_segment] and [zeek][orig_segment])
|
||||
if ([zeek][resp_segment]) { mutate { id => "mutate_add_field_ecs_network_name_resp"
|
||||
merge => { "[network][name]" => "[zeek][resp_segment]" } } }
|
||||
if ([zeek][orig_segment]) { mutate { id => "mutate_add_field_ecs_network_name_orig"
|
||||
merge => { "[network][name]" => "[zeek][orig_segment]" } } }
|
||||
|
||||
# Autonomous System and Geo are handled after enrichment in 20_enriched_to_ecs.conf
|
||||
# ☐ Autonomous System - Fields describing an Autonomous System (Internet routing prefix). - https://www.elastic.co/guide/en/ecs/current/ecs-as.html
|
||||
# ☐ Geo - Fields describing a location. - https://www.elastic.co/guide/en/ecs/current/ecs-geo.html
|
||||
|
||||
# ecs.version is required in all events - https://www.elastic.co/guide/en/ecs/current/ecs-ecs.html
|
||||
if (![ecs][version]) { mutate { id => "mutate_add_field_ecs_version"
|
||||
add_field => { "[ecs][version]" => "1.5.0" } } }
|
||||
|
||||
# event.ingested
|
||||
if (![event][ingested]) {
|
||||
ruby {
|
||||
id => "ruby_event_ingested_now_zeek"
|
||||
init => "require 'time'"
|
||||
code => "event.set('[event][ingested]', Time.now.to_f)"
|
||||
}
|
||||
date {
|
||||
id => "date_event_ingested_conv"
|
||||
match => [ "[event][ingested]", "UNIX" ]
|
||||
target => "[event][ingested]"
|
||||
}
|
||||
}
|
||||
|
||||
# event.provider
|
||||
if (![event][provider]) { mutate { id => "mutate_add_field_event_provider_enrichment"
|
||||
add_field => { "[event][provider]" => "malcolm" } } }
|
||||
|
||||
} # end if ENV_LOGSTASH_ENRICHED_TO_ECS
|
||||
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
output {
|
||||
pipeline {
|
||||
send_to => [_MALCOLM_ELASTICSEARCH_OUTPUT_PIPELINES_]
|
||||
}
|
||||
}
|
||||
3
Vagrant/resources/malcolm/logstash/pipelines/external/00_config.conf
vendored
Normal file
3
Vagrant/resources/malcolm/logstash/pipelines/external/00_config.conf
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
queue.type: persisted
|
||||
queue.max_bytes: 4gb
|
||||
path.queue: "/logstash-persistent-queue"
|
||||
6
Vagrant/resources/malcolm/logstash/pipelines/external/01_input_external_es.conf
vendored
Normal file
6
Vagrant/resources/malcolm/logstash/pipelines/external/01_input_external_es.conf
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
input {
|
||||
pipeline {
|
||||
address => "${ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL:external-es}"
|
||||
}
|
||||
}
|
||||
|
||||
13
Vagrant/resources/malcolm/logstash/pipelines/external/99_elastic_output.conf
vendored
Normal file
13
Vagrant/resources/malcolm/logstash/pipelines/external/99_elastic_output.conf
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
output {
|
||||
elasticsearch {
|
||||
id => "output_external_elasticsearch_moloch"
|
||||
hosts => "${ES_EXTERNAL_HOSTS}"
|
||||
ssl => "${ES_EXTERNAL_SSL:true}"
|
||||
ssl_certificate_verification => "${ES_EXTERNAL_SSL_CERTIFICATE_VERIFICATION:false}"
|
||||
user => "${ES_EXTERNAL_USER:}"
|
||||
password => "${ES_EXTERNAL_PASSWORD:}"
|
||||
manage_template => false
|
||||
index => "sessions2-%{+YYMMdd}"
|
||||
document_id => "%{+YYMMdd}-%{zeekLogDocId}"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
input {
|
||||
beats {
|
||||
id => "input_beats"
|
||||
host => "0.0.0.0"
|
||||
port => 5044
|
||||
ssl => "${BEATS_SSL:false}"
|
||||
ssl_certificate_authorities => ["/certs/ca.crt"]
|
||||
ssl_certificate => "/certs/server.crt"
|
||||
ssl_key => "/certs/server.key"
|
||||
ssl_verify_mode => "none"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
output {
|
||||
pipeline {
|
||||
send_to => [_MALCOLM_PARSE_PIPELINE_ADDRESSES_]
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
input {
|
||||
pipeline {
|
||||
address => "${ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL:internal-es}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
output {
|
||||
elasticsearch {
|
||||
id => "output_elasticsearch_moloch"
|
||||
hosts => "${ES_HOSTS:elasticsearch:9200}"
|
||||
manage_template => false
|
||||
index => "sessions2-%{+YYMMdd}"
|
||||
document_id => "%{+YYMMdd}-%{zeekLogDocId}"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
input {
|
||||
pipeline {
|
||||
address => "zeek-parse"
|
||||
}
|
||||
}
|
||||
|
||||
5050
Vagrant/resources/malcolm/logstash/pipelines/zeek/11_zeek_logs.conf
Normal file
5050
Vagrant/resources/malcolm/logstash/pipelines/zeek/11_zeek_logs.conf
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,967 @@
|
||||
filter {
|
||||
|
||||
# Protocol/service version ##########################################################################################
|
||||
# collect protocol version under the parent zeek.service_version array
|
||||
|
||||
if ([zeek_gquic][version]) { mutate { id => "mutate_merge_normalize_zeek_gquic_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_gquic][version]" } } }
|
||||
|
||||
if ([zeek_http][version]) { mutate { id => "mutate_merge_normalize_zeek_http_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_http][version]" } } }
|
||||
|
||||
if ([zeek_ipsec]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_field_zeek_service_version_ipsec"
|
||||
code => "
|
||||
versions = Array.new
|
||||
versions << [event.get('[zeek_ipsec][maj_ver]'),
|
||||
event.get('[zeek_ipsec][min_ver]')].compact.join('.')
|
||||
event.set('[zeek][service_version]', versions)"
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ldap][version]) { mutate { id => "mutate_merge_normalize_zeek_ldap_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_ldap][version]" } } }
|
||||
|
||||
if ([zeek_ntp][version]) { mutate { id => "mutate_merge_normalize_zeek_ntp_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_ntp][version]" } } }
|
||||
|
||||
if ([zeek_profinet][block_version]) { mutate { id => "mutate_merge_normalize_zeek_profinet_block_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_profinet][block_version]" } } }
|
||||
|
||||
if ([zeek_profinet_dce_rpc][version]) { mutate { id => "mutate_merge_normalize_zeek_profinet_dce_rpc_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_profinet_dce_rpc][version]" } } }
|
||||
|
||||
if ([zeek_rfb]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_field_zeek_service_version_rfb"
|
||||
code => '
|
||||
versions = Array.new
|
||||
clientMajorVersion = event.get("[zeek_rfb][client_major_version]").sub!(/^0*/, "")
|
||||
clientMinorVersion = event.get("[zeek_rfb][client_minor_version]").sub!(/^0*/, "")
|
||||
serverMajorVersion = event.get("[zeek_rfb][server_major_version]").sub!(/^0*/, "")
|
||||
serverMinorVersion = event.get("[zeek_rfb][server_minor_version]").sub!(/^0*/, "")
|
||||
if clientMajorVersion then
|
||||
versions << [clientMajorVersion, clientMinorVersion].join(".")
|
||||
end
|
||||
if serverMajorVersion then
|
||||
versions << [serverMajorVersion, serverMinorVersion].join(".")
|
||||
end
|
||||
event.set("[zeek][service_version]", versions.uniq)'
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_rdp][client_build]) { mutate { id => "mutate_merge_normalize_zeek_rdp_client_build"
|
||||
merge => { "[zeek][service_version]" => "[zeek_rdp][client_build]" } } }
|
||||
|
||||
if ([zeek_smtp][version]) { mutate { id => "mutate_merge_normalize_zeek_smtp_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_smtp][version]" } } }
|
||||
|
||||
if ([zeek_smb_cmd][version]) { mutate { id => "mutate_merge_normalize_zeek_smb_cmd_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_smb_cmd][version]" } } }
|
||||
|
||||
if ([zeek_snmp][version]) { mutate { id => "mutate_merge_normalize_zeek_snmp_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_snmp][version]" } } }
|
||||
|
||||
if ([zeek_socks][version]) { mutate { id => "mutate_merge_normalize_zeek_socks_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_socks][version]" } } }
|
||||
|
||||
if ([zeek_ssh][version]) { mutate { id => "mutate_merge_normalize_zeek_ssh_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_ssh][version]" } } }
|
||||
|
||||
if ([zeek_ssl][ssl_version]) { mutate { id => "mutate_merge_normalize_zeek_ssl_ssl_version"
|
||||
merge => { "[zeek][service_version]" => "[zeek_ssl][ssl_version]" } } }
|
||||
|
||||
if ([zeek][service_version]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_service_version_uniq"
|
||||
code => "event.set('[zeek][service_version]', event.get('[zeek][service_version]').uniq)"
|
||||
}
|
||||
}
|
||||
|
||||
# Action ############################################################################################################
|
||||
# collect all actions/operations/commands under the parent [zeek][action] array
|
||||
|
||||
if ([zeek_bacnet][pdu_service]) { mutate { id => "mutate_merge_normalize_zeek_bacnet_pdu_service"
|
||||
merge => { "[zeek][action]" => "[zeek_bacnet][pdu_service]" } } }
|
||||
|
||||
if ([zeek_bacnet_discovery][pdu_service]) { mutate { id => "mutate_merge_normalize_zeek_bacnet_discovery_pdu_service"
|
||||
merge => { "[zeek][action]" => "[zeek_bacnet_discovery][pdu_service]" } } }
|
||||
|
||||
if ([zeek_bacnet_property][pdu_service]) { mutate { id => "mutate_merge_normalize_zeek_bacnet_property_pdu_service"
|
||||
merge => { "[zeek][action]" => "[zeek_bacnet_property][pdu_service]" } } }
|
||||
|
||||
if ([zeek_bsap_ip_rdb]) {
|
||||
ruby {
|
||||
# action = zeek_bsap_ip_rdb.app_func_code:zeek_bsap_ip_rdb.func_code
|
||||
id => "ruby_zeek_bsap_ip_rdb_generate_action"
|
||||
code => "
|
||||
actions = Array.new unless (actions = event.get('[zeek][action]'))
|
||||
actions.append([event.get('[zeek_bsap_ip_rdb][app_func_code]'),
|
||||
event.get('[zeek_bsap_ip_rdb][func_code]')].compact.join(':'))
|
||||
event.set('[zeek][action]', actions)"
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_bsap_serial_header][sfun]) { mutate { id => "mutate_merge_normalize_zeek_bsap_serial_header_sfun"
|
||||
merge => { "[zeek][action]" => "[zeek_bsap_serial_header][sfun]" } } }
|
||||
|
||||
if ([zeek_bsap_serial_header][dfun]) { mutate { id => "mutate_merge_normalize_zeek_bsap_serial_header_dfun"
|
||||
merge => { "[zeek][action]" => "[zeek_bsap_serial_header][dfun]" } } }
|
||||
|
||||
if ([zeek_bsap_serial_rdb][func_code]) { mutate { id => "mutate_merge_normalize_zeek_bsap_serial_rdb_func_code"
|
||||
merge => { "[zeek][action]" => "[zeek_bsap_serial_rdb][func_code]" } } }
|
||||
|
||||
if ([zeek_bsap_serial_rdb_ext][sfun]) { mutate { id => "mutate_merge_normalize_zeek_bsap_serial_rdb_ext_sfun"
|
||||
merge => { "[zeek][action]" => "[zeek_bsap_serial_rdb_ext][sfun]" } } }
|
||||
|
||||
if ([zeek_bsap_serial_rdb_ext][dfun]) { mutate { id => "mutate_merge_normalize_zeek_bsap_serial_rdb_ext_dfun"
|
||||
merge => { "[zeek][action]" => "[zeek_bsap_serial_rdb_ext][dfun]" } } }
|
||||
|
||||
if ([zeek_bsap_serial_rdb_ext][extfun]) { mutate { id => "mutate_merge_normalize_zeek_bsap_serial_rdb_ext_extfun"
|
||||
merge => { "[zeek][action]" => "[zeek_bsap_serial_rdb_ext][extfun]" } } }
|
||||
|
||||
if ([zeek_cip][cip_service]) { mutate { id => "mutate_merge_normalize_zeek_cip_cip_service"
|
||||
merge => { "[zeek][action]" => "[zeek_cip][cip_service]" } } }
|
||||
|
||||
if ([zeek_dce_rpc][operation]) { mutate { id => "mutate_merge_normalize_zeek_dce_rpc_operation"
|
||||
merge => { "[zeek][action]" => "[zeek_dce_rpc][operation]" } } }
|
||||
|
||||
if ([zeek_dhcp][msg_types]) { mutate { id => "mutate_merge_normalize_zeek_dhcp_msg_types"
|
||||
merge => { "[zeek][action]" => "[zeek_dhcp][msg_types]" } } }
|
||||
|
||||
if ([zeek_dnp3][fc_request]) { mutate { id => "mutate_merge_normalize_zeek_dnp3_fc_request"
|
||||
merge => { "[zeek][action]" => "[zeek_dnp3][fc_request]" } } }
|
||||
|
||||
if ([zeek_dnp3_control]) {
|
||||
ruby {
|
||||
# action = function_code:operation_type:trip_control_code
|
||||
id => "ruby_zeek_dnp3_control_generate_action"
|
||||
code => "
|
||||
actions = Array.new unless (actions = event.get('[zeek][action]'))
|
||||
actions.append([event.get('[zeek_dnp3_control][function_code]'),
|
||||
event.get('[zeek_dnp3_control][operation_type]'),
|
||||
event.get('[zeek_dnp3_control][trip_control_code]')].compact.join(':'))
|
||||
event.set('[zeek][action]', actions)"
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_dnp3_read_objects][function_code]) { mutate { id => "mutate_merge_normalize_zeek_dnp3_read_objects"
|
||||
merge => { "[zeek][action]" => "[zeek_dnp3_read_objects][function_code]" } } }
|
||||
|
||||
if ([zeek_dns]) {
|
||||
# action: query class and type
|
||||
if ([zeek_dns][qclass_name]) and ([zeek_dns][qtype_name]) {
|
||||
mutate { id => "mutate_add_field_metadata_dns_class_and_type"
|
||||
add_field => { "[@metadata][dns_action]" => "%{[zeek_dns][qclass_name]} %{[zeek_dns][qtype_name]}" } }
|
||||
} else if ([zeek_dns][qclass_name]) {
|
||||
mutate { id => "mutate_add_field_metadata_dns_class"
|
||||
add_field => { "[@metadata][dns_action]" => "%{[zeek_dns][qclass_name]}" } }
|
||||
} else if ([zeek_dns][qtype_name]) {
|
||||
mutate { id => "mutate_add_field_metadata_dns_type"
|
||||
add_field => { "[@metadata][dns_action]" => "%{[zeek_dns][qtype_name]}" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_metadata_dns_query"
|
||||
add_field => { "[@metadata][dns_action]" => "Query" } }
|
||||
}
|
||||
mutate { id => "mutate_merge_zeek_dns_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][dns_action]" } }
|
||||
}
|
||||
|
||||
# TODO: convert zeek_ecat_foe_info.opcode and zeek_ecat_soe_info.opcode to operations
|
||||
# zeek EtherCAT commands/operations
|
||||
if ([zeek_ecat_aoe_info][command]) { mutate { id => "mutate_merge_normalize_zeek_ecat_aoe_info_command"
|
||||
merge => { "[zeek][action]" => "[zeek_ecat_aoe_info][command]" } } }
|
||||
if ([zeek_ecat_foe_info][opcode]) { mutate { id => "mutate_merge_normalize_zeek_ecat_foe_info_opcode"
|
||||
merge => { "[zeek][action]" => "[zeek_ecat_foe_info][opcode]" } } }
|
||||
if ([zeek_ecat_log_address][command]) { mutate { id => "mutate_merge_normalize_zeek_ecat_log_address"
|
||||
merge => { "[zeek][action]" => "[zeek_ecat_log_address][command]" } } }
|
||||
if ([zeek_ecat_registers][command]) { mutate { id => "mutate_merge_normalize_zeek_ecat_registers_command"
|
||||
merge => { "[zeek][action]" => "[zeek_ecat_registers][command]" } } }
|
||||
if ([zeek_ecat_soe_info][opcode]) { mutate { id => "mutate_merge_normalize_zeek_ecat_soe_info_opcode"
|
||||
merge => { "[zeek][action]" => "[zeek_ecat_soe_info][opcode]" } } }
|
||||
if ([zeek_ecat_arp_info][arp_type]) { mutate { id => "mutate_merge_normalize_zeek_ecat_arp_info_arp_type"
|
||||
merge => { "[zeek][action]" => "[zeek_ecat_arp_info][arp_type]" } } }
|
||||
|
||||
if ([zeek_enip][enip_command]) { mutate { id => "mutate_merge_normalize_zeek_enip_enip_command"
|
||||
merge => { "[zeek][action]" => "[zeek_enip][enip_command]" } } }
|
||||
|
||||
if ([zeek_ftp][command]) { mutate { id => "mutate_merge_normalize_zeek_ftp_command"
|
||||
merge => { "[zeek][action]" => "[zeek_ftp][command]" } } }
|
||||
|
||||
if ([zeek_http]) {
|
||||
if ([zeek_http][method]) {
|
||||
mutate { id => "mutate_merge_normalize_zeek_http_method"
|
||||
merge => { "[zeek][action]" => "[zeek_http][method]" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_http_request_action"
|
||||
add_field => { "[@metadata][http_request_action]" => "Request" } }
|
||||
mutate { id => "mutate_merge_field_zeek_http_request_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][http_request_action]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_irc][command]) { mutate { id => "mutate_merge_normalize_zeek_irc_command"
|
||||
merge => { "[zeek][action]" => "[zeek_irc][command]" } } }
|
||||
|
||||
if ([zeek_iso_cotp][pdu_type]) { mutate { id => "mutate_merge_normalize_zeek_iso_cotp_pdu_type"
|
||||
merge => { "[zeek][action]" => "[zeek_iso_cotp][pdu_type]" } } }
|
||||
|
||||
if ([zeek_kerberos][request_type]) { mutate { id => "mutate_merge_normalize_zeek_kerberos_request_type"
|
||||
merge => { "[zeek][action]" => "[zeek_kerberos][request_type]" } } }
|
||||
|
||||
if ([zeek_ldap][operation]) { mutate { id => "mutate_merge_normalize_zeek_ldap_operation"
|
||||
merge => { "[zeek][action]" => "[zeek_ldap][operation]" } } }
|
||||
|
||||
if ([zeek_ldap_search]) {
|
||||
if ([zeek_ldap_search][scope]) {
|
||||
mutate { id => "mutate_add_field_zeek_ldap_search_scope_action"
|
||||
add_field => { "[@metadata][zeek_ldap_search_action]" => "search %{[zeek_ldap_search][scope]}" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_ldap_search_action"
|
||||
add_field => { "[@metadata][zeek_ldap_search_action]" => "search" } }
|
||||
}
|
||||
mutate { id => "mutate_merge_field_zeek_ldap_search_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_ldap_search_action]" } }
|
||||
}
|
||||
|
||||
if ([zeek_modbus][func]) { mutate { id => "mutate_merge_normalize_zeek_modbus_func"
|
||||
merge => { "[zeek][action]" => "[zeek_modbus][func]" } } }
|
||||
|
||||
if ([zeek_modbus_mask_write_register][func]) { mutate { id => "mutate_merge_normalize_zeek_modbus_mask_write_register_func"
|
||||
merge => { "[zeek][action]" => "[zeek_modbus_mask_write_register][func]" } } }
|
||||
|
||||
if ([zeek_modbus_read_write_multiple_registers][func]) { mutate { id => "mutate_merge_normalize_zeek_modbus_read_write_multiple_registers"
|
||||
merge => { "[zeek][action]" => "[zeek_modbus_read_write_multiple_registers][func]" } } }
|
||||
|
||||
if ([zeek_mqtt_connect][connect_status]) {
|
||||
# this log entry implicitly means "connect"
|
||||
mutate { id => "mutate_add_field_zeek_mqtt_connect_action"
|
||||
add_field => { "[@metadata][zeek_mqtt_connect_action]" => "Connect" } }
|
||||
mutate { id => "mutate_merge_zeek_mqtt_connect_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_mqtt_connect_action]" } }
|
||||
}
|
||||
|
||||
if ([zeek_mqtt_publish]) {
|
||||
if ([zeek_mqtt_publish][payload_dict][messageType]) {
|
||||
# not sure if this is a standard or just the PCAPs I found :/
|
||||
mutate { id => "mutate_merge_normalize_zeek_mqtt_publish_payload_dict_messageType"
|
||||
merge => { "[zeek][action]" => "[zeek_mqtt_publish][payload_dict][messageType]" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_mqtt_publish_action"
|
||||
add_field => { "[@metadata][zeek_mqtt_publish_action]" => "Publish" } }
|
||||
mutate { id => "mutate_merge_zeek_mqtt_publish_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_mqtt_publish_action]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_mqtt_subscribe][action]) { mutate { id => "mutate_merge_normalize_zeek_mqtt_subscribe_action"
|
||||
merge => { "[zeek][action]" => "[zeek_mqtt_subscribe][action]" } } }
|
||||
|
||||
if ([zeek_mysql][cmd]) { mutate { id => "mutate_merge_normalize_zeek_mysql_cmd"
|
||||
merge => { "[zeek][action]" => "[zeek_mysql][cmd]" } } }
|
||||
|
||||
if ([zeek_ntlm][success]) {
|
||||
# this log entry implicitly means a login attempt
|
||||
mutate { id => "mutate_add_field_zeek_ntlm_action"
|
||||
add_field => { "[@metadata][zeek_ntlm_action]" => "Authenticate" } }
|
||||
mutate { id => "mutate_merge_zeek_ntlm_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_ntlm_action]" } }
|
||||
}
|
||||
|
||||
if ([zeek_ntp][mode_str]) { mutate { id => "mutate_merge_normalize_zeek_ntp_mode_str"
|
||||
merge => { "[zeek][action]" => "[zeek_ntp][mode_str]" } } }
|
||||
|
||||
if ([zeek_profinet][operation_type]) { mutate { id => "mutate_merge_normalize_zeek_profinet_operation_type"
|
||||
merge => { "[zeek][action]" => "[zeek_profinet][operation_type]" } } }
|
||||
|
||||
if ([zeek_profinet_dce_rpc][operation]) { mutate { id => "mutate_merge_normalize_zeek_profinet_dce_rpc_operation"
|
||||
merge => { "[zeek][action]" => "[zeek_profinet_dce_rpc][operation]" } } }
|
||||
|
||||
if ([zeek_rfb][auth]) and ([zeek_rfb][authentication_method]) {
|
||||
# if authentication was attempted, assign an "authenticate" action
|
||||
mutate { id => "mutate_add_field_zeek_rfb_auth_action"
|
||||
add_field => { "[@metadata][zeek_rfb_auth_action]" => "Authenticate" } }
|
||||
mutate { id => "mutate_merge_zeek_rfb_auth_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_rfb_auth_action]" } }
|
||||
}
|
||||
|
||||
if ([zeek_s7comm]) {
|
||||
ruby {
|
||||
# action = rosctr:mode:type:sub
|
||||
id => "ruby_zeek_s7comm_generate_action"
|
||||
code => "
|
||||
actions = Array.new unless (actions = event.get('[zeek][action]'))
|
||||
actions.append([event.get('[zeek_s7comm][rosctr]'),
|
||||
event.get('[zeek_s7comm][parameters][mode]'),
|
||||
event.get('[zeek_s7comm][parameters][type]'),
|
||||
event.get('[zeek_s7comm][parameters][sub]')].compact.join(':'))
|
||||
event.set('[zeek][action]', actions)"
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_sip][method]) { mutate { id => "mutate_merge_normalize_zeek_sip_method"
|
||||
merge => { "[zeek][action]" => "[zeek_sip][method]" } } }
|
||||
|
||||
if ([zeek_smtp]) {
|
||||
# action depends on varios smtp headers' presence
|
||||
if ([zeek_smtp][last_reply]) {
|
||||
if ([zeek_smtp][msg_id]) {
|
||||
mutate { id => "mutate_add_field_zeek_smtp_action_deliver"
|
||||
add_field => { "[@metadata][zeek_smtp_action]" => "Deliver message" } }
|
||||
} else if ([zeek_smtp][mailfrom]) {
|
||||
mutate { id => "mutate_add_field_zeek_smtp_action_queue"
|
||||
add_field => { "[@metadata][zeek_smtp_action]" => "Queue message" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_smtp_action_connect_reply"
|
||||
add_field => { "[@metadata][zeek_smtp_action]" => "Connect" } }
|
||||
}
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_smtp_action_connect_no_reply"
|
||||
add_field => { "[@metadata][zeek_smtp_action]" => "Connect" } }
|
||||
}
|
||||
if ([@metadata][zeek_smtp_action]) {
|
||||
mutate { id => "mutate_merge_zeek_smtp_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_smtp_action]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_socks]) {
|
||||
# socks action is "Authenticate" or "Connect" based on user/password or not
|
||||
if ([zeek_socks][user]) or ([zeek_socks][password]) {
|
||||
mutate { id => "mutate_add_field_zeek_socks_action_authenticate"
|
||||
add_field => { "[@metadata][zeek_socks_action]" => "Authenticate" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_socks_action_connect"
|
||||
add_field => { "[@metadata][zeek_socks_action]" => "Connect" } }
|
||||
}
|
||||
if ([@metadata][zeek_socks_action]) {
|
||||
mutate { id => "mutate_merge_zeek_socks_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_socks_action]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smb_cmd]) {
|
||||
ruby {
|
||||
# action = command:sub_command
|
||||
id => "ruby_zeek_smb_cmd_generate_action"
|
||||
code => "
|
||||
cmd = event.get('[zeek_smb_cmd][command]')
|
||||
subCmd = event.get('[zeek_smb_cmd][sub_command]')
|
||||
actions = Array.new unless (actions = event.get('[zeek][action]'))
|
||||
actions.append((cmd =~ /^\s*transaction\d*\s*$/i) ? subCmd : [cmd, subCmd].compact.join(':'))
|
||||
event.set('[zeek][action]', actions)"
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smb_files][action]) { mutate { id => "mutate_merge_normalize_zeek_smb_files_action"
|
||||
merge => { "[zeek][action]" => "[zeek_smb_files][action]" } } }
|
||||
|
||||
if ([zeek_smtp][method]) { mutate { id => "mutate_merge_normalize_zeek_smtp_method"
|
||||
merge => { "[zeek][action]" => "[zeek_smtp][method]" } } }
|
||||
|
||||
if ([zeek_snmp]) {
|
||||
# action based on > 0 values for variou get/set PDUs
|
||||
if ([zeek_snmp][get_bulk_requests]) and ([zeek_snmp][get_bulk_requests] != "0") {
|
||||
mutate { id => "mutate_add_field_zeek_snmp_get_bulk_requests_action"
|
||||
add_field => { "[@metadata][snmp_get_bulk_requests_action]" => "GetBulkRequest" } }
|
||||
mutate { id => "mutate_merge_zeek_snmp_action_get_bulk_requests"
|
||||
merge => { "[zeek][action]" => "[@metadata][snmp_get_bulk_requests_action]" } }
|
||||
}
|
||||
if ([zeek_snmp][get_requests]) and ([zeek_snmp][get_requests] != "0") {
|
||||
mutate { id => "mutate_add_field_zeek_snmp_get_requests_action"
|
||||
add_field => { "[@metadata][snmp_get_requests_action]" => "GetRequest" } }
|
||||
mutate { id => "mutate_merge_zeek_snmp_action_get_requests"
|
||||
merge => { "[zeek][action]" => "[@metadata][snmp_get_requests_action]" } }
|
||||
}
|
||||
if ([zeek_snmp][get_responses]) and ([zeek_snmp][get_responses] != "0") {
|
||||
mutate { id => "mutate_add_field_zeek_snmp_get_responses_action"
|
||||
add_field => { "[@metadata][snmp_get_responses_action]" => "GetResponse" } }
|
||||
mutate { id => "mutate_merge_zeek_snmp_action_get_responses"
|
||||
merge => { "[zeek][action]" => "[@metadata][snmp_get_responses_action]" } }
|
||||
}
|
||||
if ([zeek_snmp][set_requests]) and ([zeek_snmp][set_requests] != "0") {
|
||||
mutate { id => "mutate_add_field_zeek_snmp_set_requests_action"
|
||||
add_field => { "[@metadata][snmp_set_requests_action]" => "SetRequest" } }
|
||||
mutate { id => "mutate_merge_zeek_snmp_action_set_requests"
|
||||
merge => { "[zeek][action]" => "[@metadata][snmp_set_requests_action]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ssh]) {
|
||||
# ssh action is "Authenticate" or "Connect" based on auth_attempts
|
||||
if ([zeek_ssh][auth_attempts]) {
|
||||
mutate { id => "mutate_add_field_zeek_ssh_action_authenticate"
|
||||
add_field => { "[@metadata][zeek_ssh_action]" => "Authenticate" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_ssh_action_connect"
|
||||
add_field => { "[@metadata][zeek_ssh_action]" => "Connect" } }
|
||||
}
|
||||
if ([@metadata][zeek_ssh_action]) {
|
||||
mutate { id => "mutate_merge_zeek_ssh_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_ssh_action]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ssl]) {
|
||||
# SSL action will be either "connect", "validate", "resume"
|
||||
if ([zeek_ssl][resumed] == "T") {
|
||||
mutate { id => "mutate_add_field_zeek_ssl_resume"
|
||||
add_field => { "[@metadata][zeek_ssl_action]" => "Resume" } }
|
||||
} else if ([zeek_ssl][established] != "T") and ([zeek_ssl][validation_status]) and ([zeek_ssl][validation_status] != "ok") {
|
||||
mutate { id => "mutate_add_field_zeek_ssl_validate"
|
||||
add_field => { "[@metadata][zeek_ssl_action]" => "Validate Certificate" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_ssl_connect"
|
||||
add_field => { "[@metadata][zeek_ssl_action]" => "Connect" } }
|
||||
}
|
||||
mutate { id => "mutate_merge_zeek_ssl_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_ssl_action]" } }
|
||||
}
|
||||
|
||||
if ([zeek_tds][command]) { mutate { id => "mutate_merge_normalize_zeek_tds_command"
|
||||
merge => { "[zeek][action]" => "[zeek_tds][command]" } } }
|
||||
|
||||
if ([zeek_tds_rpc][procedure_name]) {
|
||||
mutate { id => "mutate_add_field_zeek_tds_rpc_procedure_name_tmp"
|
||||
add_field => { "[@metadata][zeek_tds_rpc_procedure_name_tmp]" => "%{[zeek_tds_rpc][procedure_name]}" } }
|
||||
|
||||
# remove everything after the first $
|
||||
mutate { id => "mutate_gsub_field_zeek_tds_rpc_procedure_name_tmp"
|
||||
gsub => [ "[@metadata][zeek_tds_rpc_procedure_name_tmp]", "\$.*", "" ] }
|
||||
|
||||
mutate { id => "mutate_merge_normalize_zeek_tds_rpc_procedure_name"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_tds_rpc_procedure_name_tmp]" } }
|
||||
}
|
||||
|
||||
if ([zeek_tftp][wrq]) {
|
||||
if ([zeek_tftp][wrq] == "T") {
|
||||
mutate { id => "mutate_add_field_zeek_tftp_wrq"
|
||||
add_field => { "[@metadata][zeek_tftp_action]" => "Write" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_tftp_connect"
|
||||
add_field => { "[@metadata][zeek_tftp_action]" => "Read" } }
|
||||
}
|
||||
mutate { id => "mutate_merge_zeek_tftp_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_tftp_action]" } }
|
||||
}
|
||||
|
||||
if ([zeek_tunnel][action]) { mutate { id => "mutate_merge_normalize_zeek_tunnel_action"
|
||||
merge => { "[zeek][action]" => "[zeek_tunnel][action]" } } }
|
||||
|
||||
# Result ############################################################################################################
|
||||
# collect all result/status/response/errors under the parent [zeek][result] array
|
||||
|
||||
if ([zeek_bacnet]) {
|
||||
if ([zeek_bacnet][result_code]) {
|
||||
mutate { id => "mutate_merge_normalize_zeek_bacnet_result_code"
|
||||
merge => { "[zeek][result]" => "[zeek_bacnet][result_code]" } }
|
||||
} else if ([zeek_bacnet][pdu_service]) {
|
||||
mutate { id => "mutate_add_field_zeek_bacnet_success"
|
||||
add_field => { "[@metadata][zeek_bacnet_result]" => "Success" } }
|
||||
mutate { id => "mutate_merge_field_zeek_bacnet_success"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_bacnet_result]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_cip][cip_status]) { mutate { id => "mutate_merge_normalize_zeek_cip_status_result"
|
||||
merge => { "[zeek][result]" => "[zeek_cip][cip_status]" } } }
|
||||
|
||||
if ([zeek_dhcp]) {
|
||||
# dhcp server_message and client_message populate result, as do ACK and NAK message types
|
||||
if ([zeek_dhcp][server_message]) { mutate { id => "mutate_merge_normalize_zeek_dhcp_server_message"
|
||||
merge => { "[zeek][result]" => "[zeek_dhcp][server_message]" } } }
|
||||
if ([zeek_dhcp][client_message]) { mutate { id => "mutate_merge_normalize_zeek_dhcp_client_message"
|
||||
merge => { "[zeek][result]" => "[zeek_dhcp][client_message]" } } }
|
||||
if ("ACK" in [zeek_dhcp][msg_types]) {
|
||||
mutate { id => "mutate_add_field_zeek_dhcp_ack_result"
|
||||
add_field => { "[@metadata][zeek_dhcp_result]" => "Success" } }
|
||||
} else if ("NAK" in [zeek_dhcp][msg_types]) {
|
||||
mutate { id => "mutate_add_field_zeek_dhcp_nak_result"
|
||||
add_field => { "[@metadata][zeek_dhcp_result]" => "Failure" } }
|
||||
}
|
||||
if ([@metadata][zeek_dhcp_result]) {
|
||||
mutate { id => "mutate_merge_zeek_dhcp_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_dhcp_result]" } }
|
||||
}
|
||||
}
|
||||
|
||||
# dnp3: fc_reply and iin_flags
|
||||
if ([zeek_dnp3][fc_reply]) { mutate { id => "mutate_merge_zeek_dnp3_fc_reply"
|
||||
merge => { "[zeek][result]" => "[zeek_dnp3][fc_reply]" } } }
|
||||
if ([zeek_dnp3][iin_flags]) { mutate { id => "mutate_merge_zeek_dnp3_iin_flags"
|
||||
merge => { "[zeek][result]" => "[zeek_dnp3][iin_flags]" } } }
|
||||
|
||||
# dnp3_control.status_code
|
||||
if ([zeek_dnp3_control][status_code]) { mutate { id => "mutate_merge_zeek_dnp3_control_status_code"
|
||||
merge => { "[zeek][result]" => "[zeek_dnp3_control][status_code]" } } }
|
||||
|
||||
if ([zeek_dns]) {
|
||||
# DNS result is populated by rcode_name (with NOERROR being translated to Success), and rejected
|
||||
if ([zeek_dns][rcode_name]) {
|
||||
if ([zeek_dns][rcode_name] == 'NOERROR') {
|
||||
mutate { id => "mutate_add_field_zeek_dns_noerror"
|
||||
add_field => { "[@metadata][zeek_dns_result]" => "Success" } }
|
||||
mutate { id => "mutate_merge_field_zeek_dns_noerror"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_dns_result]" } }
|
||||
} else {
|
||||
mutate { id => "mutate_merge_normalize_zeek_dns_rcode_name"
|
||||
merge => { "[zeek][result]" => "[zeek_dns][rcode_name]" } }
|
||||
}
|
||||
}
|
||||
if ([zeek_dns][rejected] == 'T') {
|
||||
mutate { id => "mutate_add_field_zeek_dns_rejected"
|
||||
add_field => { "[@metadata][zeek_dns_rejected_result]" => "Rejected" } }
|
||||
mutate { id => "mutate_merge_field_zeek_dns_rejected"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_dns_rejected_result]" } }
|
||||
}
|
||||
}
|
||||
|
||||
# TODO: convert zeek_ecat_foe_info.error_code and zeek_ecat_soe_info.error to strings?
|
||||
# zeek_ecat_foe_info.error_code and zeek_ecat_soe_info.error
|
||||
if ([zeek_ecat_foe_info][error_code]) { mutate { id => "mutate_merge_normalize_zeek_ecat_foe_info_error_code"
|
||||
merge => { "[zeek][result]" => "[zeek_ecat_foe_info][error_code]" } } }
|
||||
if ([zeek_ecat_soe_info][error]) { mutate { id => "mutate_merge_normalize_zeek_ecat_soe_info_error"
|
||||
merge => { "[zeek][result]" => "[zeek_ecat_soe_info][error]" } } }
|
||||
|
||||
# zeek_enip.enip_status
|
||||
if ([zeek_enip][enip_status]) { mutate { id => "mutate_merge_normalize_zeek_enip_enip_status"
|
||||
merge => { "[zeek][result]" => "[zeek_enip][enip_status]" } } }
|
||||
|
||||
if ([zeek_ftp][reply_code]) {
|
||||
# normalized version of reply code (reply_msg is too unpredictable)
|
||||
translate {
|
||||
id => "translate_zeek_ftp_reply_code"
|
||||
field => "[zeek_ftp][reply_code]"
|
||||
destination => "[@metadata][zeek_ftp_mapped_result]"
|
||||
dictionary_path => "/etc/ftp_result_codes.yaml"
|
||||
}
|
||||
if ([@metadata][zeek_ftp_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_ftp_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_ftp_mapped_result]" } }
|
||||
} else if ([zeek_ftp][reply_msg]) {
|
||||
mutate { id => "mutate_merge_zeek_ftp_reply_msg_result"
|
||||
merge => { "[zeek][result]" => "[zeek_ftp][reply_msg]" } }
|
||||
} else {
|
||||
mutate { id => "mutate_merge_zeek_ftp_reply_code_result"
|
||||
merge => { "[zeek][result]" => "[zeek_ftp][reply_code]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_http][status_code]) {
|
||||
# normalized version of http reply code (status_msg is too unpredictable)
|
||||
translate {
|
||||
id => "translate_zeek_http_reply_code"
|
||||
field => "[zeek_http][status_code]"
|
||||
destination => "[@metadata][zeek_http_mapped_result]"
|
||||
dictionary_path => "/etc/http_result_codes.yaml"
|
||||
}
|
||||
if ([@metadata][zeek_http_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_http_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_http_mapped_result]" } }
|
||||
} else if ([zeek_http][status_msg]) {
|
||||
mutate { id => "mutate_merge_zeek_http_status_msg_result"
|
||||
merge => { "[zeek][result]" => "[zeek_http][status_msg]" } }
|
||||
} else {
|
||||
mutate { id => "mutate_merge_zeek_http_status_code_result"
|
||||
merge => { "[zeek][result]" => "[zeek_http][status_code]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_kerberos]) {
|
||||
# result populated from success and error_msg
|
||||
if ([zeek_kerberos][success] == 'T') {
|
||||
mutate { id => "mutate_add_field_zeek_zeek_kerberos_success"
|
||||
add_field => { "[@metadata][zeek_kerberos_result]" => "Success" } }
|
||||
} else if ([zeek_kerberos][error_msg]) {
|
||||
mutate { id => "mutate_add_field_zeek_zeek_kerberos_error_msg"
|
||||
add_field => { "[@metadata][zeek_kerberos_result]" => "%{[zeek_kerberos][error_msg]}" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_zeek_kerberos_failure"
|
||||
add_field => { "[@metadata][zeek_kerberos_result]" => "Failure" } }
|
||||
}
|
||||
mutate { id => "mutate_merge_zeek_kerberos_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_kerberos_result]" } }
|
||||
}
|
||||
|
||||
# (zeek_ldap|zeek_ldap_search).(result_code)
|
||||
if ([zeek_ldap][result_code]) { mutate { id => "mutate_merge_normalize_zeek_ldap_result_code"
|
||||
merge => { "[zeek][result]" => "[zeek_ldap][result_code]" } } }
|
||||
if ([zeek_ldap_search][result_code]) { mutate { id => "mutate_merge_normalize_zeek_ldap_search_result_code"
|
||||
merge => { "[zeek][result]" => "[zeek_ldap_search][result_code]" } } }
|
||||
|
||||
if ([zeek_modbus]) {
|
||||
# result comes from exception, but if exception is missing and we have a func, then assume success
|
||||
if ([zeek_modbus][exception]) {
|
||||
mutate { id => "mutate_merge_normalize_zeek_modbus_exception"
|
||||
merge => { "[zeek][result]" => "[zeek_modbus][exception]" } }
|
||||
} else if ([zeek_modbus][func]) {
|
||||
mutate { id => "mutate_add_field_zeek_modbus_success"
|
||||
add_field => { "[@metadata][zeek_modbus_result]" => "Success" } }
|
||||
mutate { id => "mutate_merge_field_zeek_modbus_success"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_modbus_result]" } }
|
||||
}
|
||||
}
|
||||
|
||||
# result for zeek_mqtt_connect: connect_status.'Connection Accepted' -> 'Success', else connect_status
|
||||
if ([zeek_mqtt_connect][connect_status] == 'Connection Accepted') {
|
||||
mutate { id => "mutate_add_field_zeek_mqtt_connect_success"
|
||||
add_field => { "[@metadata][zeek_mqtt_connect_success]" => "Success" } }
|
||||
mutate { id => "mutate_merge_field_zeek_mqtt_connect_success"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_mqtt_connect_success]" } }
|
||||
} else if ([zeek_mqtt_connect][connect_status]) {
|
||||
mutate { id => "mutate_merge_zeek_mqtt_connect_connect_status"
|
||||
merge => { "[zeek][result]" => "[zeek_mqtt_connect][connect_status]" } }
|
||||
}
|
||||
|
||||
# result for zeek_mqtt_publish: status.'ok' -> 'Success', else status
|
||||
if ([zeek_mqtt_publish][status] == 'ok') {
|
||||
mutate { id => "mutate_add_field_zeek_mqtt_publish_success"
|
||||
add_field => { "[@metadata][zeek_mqtt_publish_success]" => "Success" } }
|
||||
mutate { id => "mutate_merge_field_zeek_mqtt_publish_success"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_mqtt_publish_success]" } }
|
||||
} else if ([zeek_mqtt_publish][status]) {
|
||||
mutate { id => "mutate_merge_zeek_mqtt_publish_publish_status"
|
||||
merge => { "[zeek][result]" => "[zeek_mqtt_publish][status]" } }
|
||||
}
|
||||
|
||||
# zeek_mqtt_subscribe.ack.'T' -> 'Acknowledged'
|
||||
if ([zeek_mqtt_subscribe][ack] == 'T') {
|
||||
mutate { id => "mutate_add_field_zeek_mqtt_subscribe_ack"
|
||||
add_field => { "[@metadata][zeek_mqtt_subscribe_acknowledged]" => "Acknowledged" } }
|
||||
mutate { id => "mutate_merge_field_zeek_mqtt_subscribe_ack"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_mqtt_subscribe_acknowledged]" } }
|
||||
}
|
||||
|
||||
if ([zeek_mysql]) {
|
||||
# mysql result comes from success and response
|
||||
if ([zeek_mysql][success] == "T") {
|
||||
mutate { id => "mutate_add_field_zeek_mysql_success"
|
||||
add_field => { "[@metadata][zeek_mysql_result]" => "Success" } }
|
||||
} else if ([zeek_mysql][response] =~ /^Access denied/) {
|
||||
mutate { id => "mutate_add_field_zeek_mysql_access"
|
||||
add_field => { "[@metadata][zeek_mysql_result]" => "Access denied" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_mysql_failure"
|
||||
add_field => { "[@metadata][zeek_mysql_result]" => "Failure" } }
|
||||
}
|
||||
mutate { id => "mutate_merge_zeek_mysql_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_mysql_result]" } }
|
||||
}
|
||||
|
||||
if ([zeek_ntlm]) {
|
||||
# ntlm result comes from .success
|
||||
if ([zeek_ntlm][success] == "T") {
|
||||
mutate { id => "mutate_add_field_zeek_ntlm_success"
|
||||
add_field => { "[@metadata][zeek_ntlm_result]" => "Success" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_ntlm_failure"
|
||||
add_field => { "[@metadata][zeek_ntlm_result]" => "Failure" } }
|
||||
}
|
||||
mutate { id => "mutate_merge_zeek_ntlm_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_ntlm_result]" } }
|
||||
}
|
||||
|
||||
if ([zeek_radius][result]) {
|
||||
if ([zeek_radius][result] =~ /^(?i)succ/) {
|
||||
mutate { id => "mutate_add_field_zeek_radius_success"
|
||||
add_field => { "[@metadata][zeek_radius_result]" => "Success" } }
|
||||
} else if ([zeek_radius][result] =~ /^(?i)fail/) {
|
||||
mutate { id => "mutate_add_field_zeek_radius_failure"
|
||||
add_field => { "[@metadata][zeek_radius_result]" => "Failure" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_zeek_radius_result_fallback"
|
||||
add_field => { "[@metadata][zeek_radius_result]" => "%{[zeek_radius][result]}" } }
|
||||
}
|
||||
mutate { id => "mutate_merge_zeek_radius_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_radius_result]" } }
|
||||
# if authentication was attempted, also assign an "authenticate" action
|
||||
mutate { id => "mutate_add_field_zeek_radius_auth_action"
|
||||
add_field => { "[@metadata][zeek_radius_auth_action]" => "Authenticate" } }
|
||||
mutate { id => "mutate_merge_zeek_radius_auth_action"
|
||||
merge => { "[zeek][action]" => "[@metadata][zeek_radius_auth_action]" } }
|
||||
}
|
||||
|
||||
if ([zeek_rdp][result]) { mutate { id => "mutate_merge_normalize_zeek_rdp_result"
|
||||
merge => { "[zeek][result]" => "[zeek_rdp][result]" } } }
|
||||
|
||||
if ([zeek_s7comm][parameters][code]) {
|
||||
# reference: https://github.com/wireshark/wireshark/blob/master/epan/dissectors/packet-s7comm.c
|
||||
translate {
|
||||
id => "translate_zeek_s7comm_parameters_code"
|
||||
field => "[zeek_s7comm][parameters][code]"
|
||||
destination => "[@metadata][zeek_s7comm_mapped_result]"
|
||||
dictionary_path => "/etc/s7comm_result_codes.yaml"
|
||||
fallback => "%{[zeek_s7comm][parameters][code]}"
|
||||
}
|
||||
if ([@metadata][zeek_s7comm_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_s7comm_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_s7comm_mapped_result]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_sip][status_code]) {
|
||||
# normalized version of sip reply code (status_msg may be unpredictable)
|
||||
translate {
|
||||
id => "translate_zeek_sip_reply_code"
|
||||
field => "[zeek_sip][status_code]"
|
||||
destination => "[@metadata][zeek_sip_mapped_result]"
|
||||
dictionary_path => "/etc/sip_result_codes.yaml"
|
||||
}
|
||||
if ([@metadata][zeek_sip_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_sip_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_sip_mapped_result]" } }
|
||||
} else if ([zeek_sip][status_msg]) {
|
||||
mutate { id => "mutate_merge_zeek_sip_status_msg_result"
|
||||
merge => { "[zeek][result]" => "[zeek_sip][status_msg]" } }
|
||||
} else {
|
||||
mutate { id => "mutate_merge_zeek_sip_status_code_result"
|
||||
merge => { "[zeek][result]" => "[zeek_sip][status_code]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smb_cmd][status]) {
|
||||
# zeek_smb_cmd.status (SUCCESS, NO_SUCH_FILE, ACCESS_DENIED, OBJECT_NAME_COLLISION, etc.)
|
||||
translate {
|
||||
id => "translate_zeek_smb_cmd_status"
|
||||
field => "[zeek_smb_cmd][status]"
|
||||
destination => "[@metadata][zeek_smb_cmd_mapped_result]"
|
||||
dictionary => {
|
||||
"SUCCESS" => "Success"
|
||||
# TODO... normalize other codes? or maybe just case-normalize and remove underscores/dashes?
|
||||
# e.g., "ACCESS_DENIED".split(/[_-]/).collect(&:capitalize).join(' ')
|
||||
}
|
||||
fallback => "%{[zeek_smb_cmd][status]}"
|
||||
}
|
||||
if ([@metadata][zeek_smb_cmd_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_smb_cmd_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_smb_cmd_mapped_result]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smtp]) {
|
||||
if ([zeek_smtp][last_reply_code]) {
|
||||
# normalized version of smtp reply code (last_reply may be unpredictable)
|
||||
translate {
|
||||
id => "translate_zeek_smtp_last_reply_code"
|
||||
field => "[zeek_smtp][last_reply_code]"
|
||||
destination => "[@metadata][zeek_smtp_mapped_result]"
|
||||
dictionary_path => "/etc/smtp_result_codes.yaml"
|
||||
}
|
||||
}
|
||||
if ([@metadata][zeek_smtp_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_smtp_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_smtp_mapped_result]" } }
|
||||
} else if ([zeek_smtp][last_reply]) {
|
||||
mutate { id => "mutate_merge_zeek_smtp_last_reply_result"
|
||||
merge => { "[zeek][result]" => "[zeek_smtp][last_reply]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_socks][server_status]) {
|
||||
translate {
|
||||
id => "translate_zeek_socks_server_status"
|
||||
field => "[zeek_socks][server_status]"
|
||||
destination => "[@metadata][zeek_socks_mapped_result]"
|
||||
dictionary => {
|
||||
"succeeded" => "Success"
|
||||
# TODO... normalize other codes (figure out what they are)
|
||||
}
|
||||
}
|
||||
if ([@metadata][zeek_socks_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_socks_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_socks_mapped_result]" } }
|
||||
} else if ([zeek_socks][server_status]) {
|
||||
mutate { id => "mutate_merge_zeek_socks_server_status_result"
|
||||
merge => { "[zeek][result]" => "[zeek_socks][server_status]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ssh][auth_success]) {
|
||||
translate {
|
||||
id => "translate_zeek_ssh_auth_success"
|
||||
field => "[zeek_ssh][auth_success]"
|
||||
destination => "[@metadata][zeek_ssh_mapped_result]"
|
||||
dictionary => {
|
||||
"T" => "Success"
|
||||
"F" => "Failure"
|
||||
}
|
||||
}
|
||||
if ([@metadata][zeek_ssh_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_ssh_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_ssh_mapped_result]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ssl]) {
|
||||
if ([zeek_ssl][established] == "T") {
|
||||
mutate { id => "mutate_add_field_zeek_ssl_result_success"
|
||||
add_field => { "[@metadata][zeek_ssl_mapped_success_result]" => "Success" } }
|
||||
} else if (![zeek_ssl][last_alert]) {
|
||||
mutate { id => "mutate_add_field_zeek_ssl_result_failure"
|
||||
add_field => { "[@metadata][zeek_ssl_mapped_success_result]" => "Failure" } }
|
||||
}
|
||||
if ([@metadata][zeek_ssl_mapped_success_result]) {
|
||||
mutate { id => "mutate_merge_zeek_ssl_mapped_success_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_ssl_mapped_success_result]" } }
|
||||
}
|
||||
if ([zeek_ssl][last_alert]) {
|
||||
mutate { id => "mutate_merge_field_zeek_ssl_result_last_alert"
|
||||
merge => { "[zeek][result]" => "[zeek_ssl][last_alert]" } }
|
||||
}
|
||||
if ([zeek_ssl][validation_status]) and ([zeek_ssl][validation_status] != 'ok') {
|
||||
mutate { id => "mutate_merge_field_zeek_ssl_result_validation_status"
|
||||
merge => { "[zeek][result]" => "[zeek_ssl][validation_status]" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_tftp]) {
|
||||
|
||||
if (![zeek_tftp][error_code]) and (![zeek_tftp][error_msg]) {
|
||||
# no error, set as "success"
|
||||
mutate { id => "mutate_add_field_zeek_tftp_result_success"
|
||||
add_field => { "[@metadata][zeek_tftp_result_success]" => "Success" } }
|
||||
mutate { id => "mutate_merge_zeek_tftp_result_success"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_tftp_result_success]" } }
|
||||
|
||||
} else {
|
||||
# normalized version of reply code
|
||||
translate {
|
||||
id => "translate_zeek_tftp_error_code"
|
||||
field => "[zeek_tftp][error_code]"
|
||||
destination => "[@metadata][zeek_tftp_mapped_result]"
|
||||
dictionary_path => "/etc/tftp_result_codes.yaml"
|
||||
}
|
||||
if ([@metadata][zeek_tftp_mapped_result]) {
|
||||
mutate { id => "mutate_merge_zeek_tftp_mapped_result"
|
||||
merge => { "[zeek][result]" => "[@metadata][zeek_tftp_mapped_result]" } }
|
||||
} else if ([zeek_tftp][error_msg]) {
|
||||
mutate { id => "mutate_merge_zeek_tftp_error_msg_result"
|
||||
merge => { "[zeek][result]" => "[zeek_tftp][error_msg]" } }
|
||||
} else {
|
||||
mutate { id => "mutate_merge_zeek_tftp_error_code_result"
|
||||
merge => { "[zeek][result]" => "[zeek_tftp][error_code]" } }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#####################################################################################################################
|
||||
# remove any duplicates from action and result
|
||||
if ([zeek][action]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_action_uniq"
|
||||
code => "event.set('[zeek][action]', event.get('[zeek][action]').uniq)"
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek][result]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_result_uniq"
|
||||
code => "event.set('[zeek][result]', event.get('[zeek][result]').uniq)"
|
||||
}
|
||||
}
|
||||
|
||||
# FUIDs #############################################################################################################
|
||||
# collect all other FUIDs under parent [zeek][fuid] array (some were already done at the root level in
|
||||
# the "rename" in 11_zeek_logs.conf)
|
||||
|
||||
if ([zeek_files][parent_fuid]) { mutate { id => "mutate_merge_normalize_zeek_files_parent_fuid"
|
||||
merge => { "[zeek][fuid]" => "[zeek_files][parent_fuid]" } } }
|
||||
|
||||
if ([zeek_http][orig_fuids]) { mutate { id => "mutate_merge_normalize_zeek_http_orig_fuids"
|
||||
merge => { "[zeek][fuid]" => "[zeek_http][orig_fuids]" } } }
|
||||
|
||||
if ([zeek_http][resp_fuids]) { mutate { id => "mutate_merge_normalize_zeek_http_resp_fuids"
|
||||
merge => { "[zeek][fuid]" => "[zeek_http][resp_fuids]" } } }
|
||||
|
||||
if ([zeek_kerberos][client_cert_fuid]) { mutate { id => "mutate_merge_normalize_zeek_kerberos_client_cert_fuid"
|
||||
merge => { "[zeek][fuid]" => "[zeek_kerberos][client_cert_fuid]" } } }
|
||||
|
||||
if ([zeek_kerberos][server_cert_fuid]) { mutate { id => "mutate_merge_normalize_zeek_kerberos_server_cert_fuid"
|
||||
merge => { "[zeek][fuid]" => "[zeek_kerberos][server_cert_fuid]" } } }
|
||||
|
||||
if ([zeek_ssl][cert_chain_fuids]) { mutate { id => "mutate_merge_normalize_zeek_ssl_cert_chain_fuids"
|
||||
merge => { "[zeek][fuid]" => "[zeek_ssl][cert_chain_fuids]" } } }
|
||||
|
||||
if ([zeek_ssl][client_cert_chain_fuids]) { mutate { id => "mutate_merge_normalize_zeek_ssl_client_cert_chain_fuids"
|
||||
merge => { "[zeek][fuid]" => "[zeek_ssl][client_cert_chain_fuids]" } } }
|
||||
|
||||
if ([zeek][fuid]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_fuid_uniq"
|
||||
code => "event.set('[zeek][fuid]', event.get('[zeek][fuid]').uniq)"
|
||||
}
|
||||
}
|
||||
|
||||
# File/MIME types ###################################################################################################
|
||||
# collect all file/MIME types under the parent [zeek][filetype] array
|
||||
|
||||
if ([zeek_files][mime_type]) { mutate { id => "mutate_merge_normalize_zeek_files_mime_type"
|
||||
merge => { "[zeek][filetype]" => "[zeek_files][mime_type]" } } }
|
||||
|
||||
if ([zeek_ftp][mime_type]) { mutate { id => "mutate_merge_normalize_zeek_ftp_mime_type"
|
||||
merge => { "[zeek][filetype]" => "[zeek_ftp][mime_type]" } } }
|
||||
|
||||
if ([zeek_http][orig_mime_types]) { mutate { id => "mutate_merge_normalize_zeek_http_orig_mime_types"
|
||||
merge => { "[zeek][filetype]" => "[zeek_http][orig_mime_types]" } } }
|
||||
|
||||
if ([zeek_http][resp_mime_types]) { mutate { id => "mutate_merge_normalize_zeek_http_resp_mime_types"
|
||||
merge => { "[zeek][filetype]" => "[zeek_http][resp_mime_types]" } } }
|
||||
|
||||
if ([zeek_irc][dcc_mime_type]) { mutate { id => "mutate_merge_normalize_zeek_irc_dcc_mime_type"
|
||||
merge => { "[zeek][filetype]" => "[zeek_irc][dcc_mime_type]" } } }
|
||||
|
||||
if ([zeek_intel][file_mime_type]) { mutate { id => "mutate_merge_normalize_zeek_intel_file_mime_type"
|
||||
merge => { "[zeek][filetype]" => "[zeek_intel][file_mime_type]" } } }
|
||||
|
||||
if ([zeek_notice][file_mime_type]) { mutate { id => "mutate_merge_normalize_zeek_notice_file_mime_type"
|
||||
merge => { "[zeek][filetype]" => "[zeek_notice][file_mime_type]" } } }
|
||||
|
||||
if ([zeek_sip][content_type]) { mutate { id => "mutate_merge_normalize_zeek_sip_content_type"
|
||||
merge => { "[zeek][filetype]" => "[zeek_sip][content_type]" } } }
|
||||
|
||||
if ([zeek][filetype]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_filetype_uniq"
|
||||
code => "event.set('[zeek][filetype]', event.get('[zeek][filetype]').uniq)"
|
||||
}
|
||||
}
|
||||
|
||||
# Filenames #########################################################################################################
|
||||
# collect all filenames under the parent [zeek][filename] array
|
||||
|
||||
if ([zeek_ecat_foe_info][filename]) { mutate { id => "mutate_merge_normalize_zeek_ecat_foe_info_filename"
|
||||
merge => { "[zeek][filename]" => "[zeek_ecat_foe_info][filename]" } } }
|
||||
|
||||
if ([zeek_files][filename]) { mutate { id => "mutate_merge_normalize_zeek_files_filename"
|
||||
merge => { "[zeek][filename]" => "[zeek_files][filename]" } } }
|
||||
|
||||
if ([zeek_files][extracted]) { mutate { id => "mutate_merge_normalize_zeek_files_extracted"
|
||||
merge => { "[zeek][filename]" => "[zeek_files][extracted]" } } }
|
||||
|
||||
if ([zeek_http][orig_filenames]) { mutate { id => "mutate_merge_normalize_zeek_http_orig_filenames"
|
||||
merge => { "[zeek][filename]" => "[zeek_http][orig_filenames]" } } }
|
||||
|
||||
if ([zeek_http][resp_filenames]) { mutate { id => "mutate_merge_normalize_zeek_http_resp_filenames"
|
||||
merge => { "[zeek][filename]" => "[zeek_http][resp_filenames]" } } }
|
||||
|
||||
if ([zeek_irc][dcc_file_name]) { mutate { id => "mutate_merge_normalize_zeek_irc_dcc_file_name"
|
||||
merge => { "[zeek][filename]" => "[zeek_irc][dcc_file_name]" } } }
|
||||
|
||||
if ([zeek_smb_files][name]) { mutate { id => "mutate_merge_normalize_zeek_smb_files_name"
|
||||
merge => { "[zeek][filename]" => "[zeek_smb_files][name]" } } }
|
||||
|
||||
if ([zeek_smb_files][prev_name]) { mutate { id => "mutate_merge_normalize_zeek_smb_files_prev_name"
|
||||
merge => { "[zeek][filename]" => "[zeek_smb_files][prev_name]" } } }
|
||||
|
||||
if ([zeek_tftp][fname]) { mutate { id => "mutate_merge_normalize_zeek_tftp_fname"
|
||||
merge => { "[zeek][filename]" => "[zeek_tftp][fname]" } } }
|
||||
|
||||
if ([zeek][filename]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_filename_uniq"
|
||||
code => "event.set('[zeek][filename]', event.get('[zeek][filename]').uniq)"
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,299 @@
|
||||
filter {
|
||||
|
||||
# set data types for fields that belong to various zeek logs
|
||||
|
||||
# todo
|
||||
# "[zeek_ecat_dev_info][fmmucnt]" => "integer"
|
||||
# "[zeek_ecat_dev_info][smcount]" => "integer"
|
||||
|
||||
mutate {
|
||||
id => "mutate_convert_zeek_bulk"
|
||||
convert => {
|
||||
"[zeek_bacnet][invoke_id]" => "integer"
|
||||
"[zeek_bacnet_discovery][instance_number]" => "integer"
|
||||
"[zeek_bacnet_discovery][range_low]" => "integer"
|
||||
"[zeek_bacnet_discovery][range_high]" => "integer"
|
||||
"[zeek_bacnet_property][instance_number]" => "integer"
|
||||
"[zeek_bacnet_property][array_index]" => "integer"
|
||||
"[zeek_bsap_ip_header][type_name]" => "integer"
|
||||
"[zeek_bsap_ip_rdb][data_len]" => "integer"
|
||||
"[zeek_bsap_ip_rdb][header_size]" => "integer"
|
||||
"[zeek_bsap_ip_rdb][mes_seq]" => "integer"
|
||||
"[zeek_bsap_ip_rdb][node_status]" => "integer"
|
||||
"[zeek_bsap_ip_rdb][res_seq]" => "integer"
|
||||
"[zeek_bsap_ip_rdb][sequence]" => "integer"
|
||||
"[zeek_bsap_serial_header][ctl]" => "integer"
|
||||
"[zeek_bsap_serial_header][dadd]" => "integer"
|
||||
"[zeek_bsap_serial_header][nsb]" => "integer"
|
||||
"[zeek_bsap_serial_header][sadd]" => "integer"
|
||||
"[zeek_bsap_serial_header][seq]" => "integer"
|
||||
"[zeek_bsap_serial_rdb_ext][nsb]" => "integer"
|
||||
"[zeek_bsap_serial_rdb_ext][seq]" => "integer"
|
||||
"[zeek_cip][cip_sequence_count]" => "integer"
|
||||
"[zeek_cip_identity][device_type_id]" => "integer"
|
||||
"[zeek_cip_identity][encapsulation_version]" => "integer"
|
||||
"[zeek_cip_identity][product_code]" => "integer"
|
||||
"[zeek_cip_identity][socket_port]" => "integer"
|
||||
"[zeek_cip_identity][vendor_id]" => "integer"
|
||||
"[zeek_cip_io][data_length]" => "integer"
|
||||
"[zeek_cip_io][sequence_number]" => "integer"
|
||||
"[zeek_conn][duration]" => "float"
|
||||
"[zeek_dce_rpc][rtt]" => "float"
|
||||
"[zeek_dhcp][duration]" => "float"
|
||||
"[zeek_dnp3_control][index_number]" => "integer"
|
||||
"[zeek_dnp3_control][execute_count]" => "integer"
|
||||
"[zeek_dnp3_control][on_time]" => "integer"
|
||||
"[zeek_dnp3_control][off_time]" => "integer"
|
||||
"[zeek_dnp3_objects][object_count]" => "integer"
|
||||
"[zeek_dnp3_objects][range_high]" => "integer"
|
||||
"[zeek_dnp3_objects][range_low]" => "integer"
|
||||
"[zeek_dns][rtt]" => "float"
|
||||
"[zeek_ecat_log_address][length]" => "integer"
|
||||
"[zeek_enip][length]" => "integer"
|
||||
"[zeek_ipsec][maj_ver]" => "integer"
|
||||
"[zeek_ipsec][min_ver]" => "integer"
|
||||
"[zeek_ipsec][exchange_type]" => "integer"
|
||||
"[zeek_ipsec][ke_dh_groups]" => "integer"
|
||||
"[zeek_ipsec][proposals]" => "integer"
|
||||
"[zeek_ipsec][length]" => "integer"
|
||||
"[zeek_ldap][version]" => "integer"
|
||||
"[zeek_ldap_search][result_count]" => "integer"
|
||||
"[zeek_modbus_detailed][unit_id]" => "integer"
|
||||
"[zeek_modbus_detailed][address]" => "integer"
|
||||
"[zeek_modbus_detailed][quantity]" => "integer"
|
||||
"[zeek_modbus_mask_write_register][unit_id]" => "integer"
|
||||
"[zeek_modbus_mask_write_register][address]" => "integer"
|
||||
"[zeek_modbus_mask_write_register][and_mask]" => "integer"
|
||||
"[zeek_modbus_mask_write_register][or_mask]" => "integer"
|
||||
"[zeek_modbus_read_write_multiple_registers][unit_id]" => "integer"
|
||||
"[zeek_modbus_read_write_multiple_registers][write_start_address]" => "integer"
|
||||
"[zeek_modbus_read_write_multiple_registers][read_start_address]" => "integer"
|
||||
"[zeek_modbus_read_write_multiple_registers][read_quantity]" => "integer"
|
||||
"[zeek_modbus_register][delta]" => "float"
|
||||
"[zeek_modbus_register][new_val]" => "integer"
|
||||
"[zeek_modbus_register][old_val]" => "integer"
|
||||
"[zeek_modbus_register][register]" => "integer"
|
||||
"[zeek_mqtt_publish][payload_len]" => "integer"
|
||||
"[zeek_mqtt_subscribe][granted_qos_level]" => "integer"
|
||||
"[zeek_mqtt_subscribe][qos_levels]" => "integer"
|
||||
"[zeek_ntp][num_exts]" => "integer"
|
||||
"[zeek_ntp][poll]" => "float"
|
||||
"[zeek_ntp][precision]" => "float"
|
||||
"[zeek_ntp][root_delay]" => "float"
|
||||
"[zeek_ntp][root_disp]" => "float"
|
||||
"[zeek_ntp][version]" => "integer"
|
||||
"[zeek_s7comm][item_count]" => "integer"
|
||||
"[zeek_signatures][host_count]" => "integer"
|
||||
"[zeek_signatures][signature_count]" => "integer"
|
||||
"[zeek_smb_cmd][rtt]" => "float"
|
||||
"[zeek_smb_files][data_len_req]" => "integer"
|
||||
"[zeek_smb_files][data_len_rsp]" => "integer"
|
||||
"[zeek_smb_files][data_offset_req]" => "integer"
|
||||
"[zeek_tftp][size]" => "integer"
|
||||
"[zeek_tftp][block_sent]" => "integer"
|
||||
"[zeek_tftp][block_acked]" => "integer"
|
||||
"[zeek_tftp][error_code]" => "integer"
|
||||
"[zeek_wireguard][sender_index]" => "integer"
|
||||
"[zeek_wireguard][receiver_index]" => "integer"
|
||||
}
|
||||
}
|
||||
|
||||
# convert all zeek "time" types (minus zeek.ts, which was done earlier)
|
||||
# https://docs.zeek.org/en/current/script-reference/types.html#type-time
|
||||
|
||||
if ([zeek_kerberos][from]) {
|
||||
if ([zeek_kerberos][from] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_kerberos_from_zero"
|
||||
remove_field => [ "[zeek_kerberos][from]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_kerberos_from"
|
||||
match => [ "[zeek_kerberos][from]", "UNIX" ]
|
||||
target => "[zeek_kerberos][from]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_kerberos][till]) {
|
||||
if ([zeek_kerberos][till] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_kerberos_till_zero"
|
||||
remove_field => [ "[zeek_kerberos][till]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_kerberos_till"
|
||||
match => [ "[zeek_kerberos][till]", "UNIX" ]
|
||||
target => "[zeek_kerberos][till]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ntp][org_time]) {
|
||||
if ([zeek_ntp][org_time] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_ntp_org_time_zero"
|
||||
remove_field => [ "[zeek_ntp][org_time]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_ntp_org_time"
|
||||
match => [ "[zeek_ntp][org_time]", "UNIX" ]
|
||||
target => "[zeek_ntp][org_time]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ntp][rec_time]) {
|
||||
if ([zeek_ntp][rec_time] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_ntp_rec_time_zero"
|
||||
remove_field => [ "[zeek_ntp][rec_time]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_ntp_rec_time"
|
||||
match => [ "[zeek_ntp][rec_time]", "UNIX" ]
|
||||
target => "[zeek_ntp][rec_time]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ntp][ref_time]) {
|
||||
if ([zeek_ntp][ref_time] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_ntp_ref_time_zero"
|
||||
remove_field => [ "[zeek_ntp][ref_time]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_ntp_ref_time"
|
||||
match => [ "[zeek_ntp][ref_time]", "UNIX" ]
|
||||
target => "[zeek_ntp][ref_time]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ntp][xmt_time]) {
|
||||
if ([zeek_ntp][xmt_time] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_ntp_xmt_time_zero"
|
||||
remove_field => [ "[zeek_ntp][xmt_time]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_ntp_xmt_time"
|
||||
match => [ "[zeek_ntp][xmt_time]", "UNIX" ]
|
||||
target => "[zeek_ntp][xmt_time]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_pe][compile_ts]) {
|
||||
if ([zeek_pe][compile_ts] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_pe_compile_ts_zero"
|
||||
remove_field => [ "[zeek_pe][compile_ts]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_pe_compile_ts"
|
||||
match => [ "[zeek_pe][compile_ts]", "UNIX" ]
|
||||
target => "[zeek_pe][compile_ts]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smb_files][times_accessed]) {
|
||||
if ([zeek_smb_files][times_accessed] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_smb_files_times_accessed_zero"
|
||||
remove_field => [ "[zeek_smb_files][times_accessed]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_smb_files_times_accessed"
|
||||
match => [ "[zeek_smb_files][times_accessed]", "UNIX" ]
|
||||
target => "[zeek_smb_files][times_accessed]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smb_files][times_changed]) {
|
||||
if ([zeek_smb_files][times_changed] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_smb_files_times_changed_zero"
|
||||
remove_field => [ "[zeek_smb_files][times_changed]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_smb_files_times_changed"
|
||||
match => [ "[zeek_smb_files][times_changed]", "UNIX" ]
|
||||
target => "[zeek_smb_files][times_changed]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smb_files][times_created]) {
|
||||
if ([zeek_smb_files][times_created] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_smb_files_times_created_zero"
|
||||
remove_field => [ "[zeek_smb_files][times_created]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_smb_files_times_created"
|
||||
match => [ "[zeek_smb_files][times_created]", "UNIX" ]
|
||||
target => "[zeek_smb_files][times_created]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smb_files][times_modified]) {
|
||||
if ([zeek_smb_files][times_modified] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_smb_files_times_modified_zero"
|
||||
remove_field => [ "[zeek_smb_files][times_modified]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_smb_files_times_modified"
|
||||
match => [ "[zeek_smb_files][times_modified]", "UNIX" ]
|
||||
target => "[zeek_smb_files][times_modified]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_smb_files][ts]) {
|
||||
if ([zeek_smb_files][ts] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_smb_files_ts_zero"
|
||||
remove_field => [ "[zeek_smb_files][ts]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_smb_files_ts"
|
||||
match => [ "[zeek_smb_files][ts]", "UNIX" ]
|
||||
target => "[zeek_smb_files][ts]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_snmp][up_since]) {
|
||||
if ([zeek_snmp][up_since] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_snmp_up_since_zero"
|
||||
remove_field => [ "[zeek_snmp][up_since]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_snmp_up_since"
|
||||
match => [ "[zeek_snmp][up_since]", "UNIX" ]
|
||||
target => "[zeek_snmp][up_since]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_x509][certificate_not_valid_after]) {
|
||||
if ([zeek_x509][certificate_not_valid_after] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_x509_certificate_not_valid_after_zero"
|
||||
remove_field => [ "[zeek_x509][certificate_not_valid_after]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_x509_certificate_not_valid_after"
|
||||
match => [ "[zeek_x509][certificate_not_valid_after]", "UNIX" ]
|
||||
target => "[zeek_x509][certificate_not_valid_after]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_x509][certificate_not_valid_before]) {
|
||||
if ([zeek_x509][certificate_not_valid_before] == "0.000000") {
|
||||
mutate { id => "mutate_remove_field_zeek_x509_certificate_not_valid_before_zero"
|
||||
remove_field => [ "[zeek_x509][certificate_not_valid_before]" ] }
|
||||
} else {
|
||||
date {
|
||||
id => "date_zeek_x509_certificate_not_valid_before"
|
||||
match => [ "[zeek_x509][certificate_not_valid_before]", "UNIX" ]
|
||||
target => "[zeek_x509][certificate_not_valid_before]"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,773 @@
|
||||
filter {
|
||||
|
||||
# Map zeek fields to ECS where possible (see https://github.com/idaholab/Malcolm/issues/16)
|
||||
# For now I will add fields rather than rename them. This will preserve backwards compatibility
|
||||
# but the records will be somewhat bigger. I'll have to address what (if anything) to do with upgrades.
|
||||
|
||||
#
|
||||
# Some fields (particularly AS and GEO fields) don't exist at this point in the pipeline, as they
|
||||
# are added during enrichment. In that case, I will make a note of it here and handle it in
|
||||
# ./pipelines/enrichment/20_enriched_to_ecs.conf:
|
||||
#
|
||||
# Autonomous System and Geo are handled after enrichment in 20_enriched_to_ecs.conf
|
||||
# 🗹 Autonomous System - Fields describing an Autonomous System (Internet routing prefix). - https://www.elastic.co/guide/en/ecs/current/ecs-as.html
|
||||
# 🗹 Geo - Fields describing a location. - https://www.elastic.co/guide/en/ecs/current/ecs-geo.html
|
||||
#
|
||||
# Risk/severity/priority/whatever will be done *after* enrichment based on normalized fields
|
||||
# - event.severity, event.risk_score and event.risk_score_norm
|
||||
#
|
||||
# TODO: certain other fields that I'm already normalizing for moloch could maybe be moved out of
|
||||
# here into enriched_to_ecs in the enrichment pipeline, but that kind of depends on what things
|
||||
# look like when we add more data sources in the future, or if moloch tackles ECS, etc.
|
||||
#
|
||||
|
||||
# for now don't do anything unles an env explicitly enables it
|
||||
mutate {
|
||||
id => "mutate_add_field_env_logstash_zeek_to_ecs"
|
||||
add_field => { "[@metadata][ENV_LOGSTASH_ZEEK_TO_ECS]" => "${LOGSTASH_TO_ECS:false}" }
|
||||
}
|
||||
if ([@metadata][ENV_LOGSTASH_ZEEK_TO_ECS] == "true") {
|
||||
|
||||
# I will mark these ☐ off with a 🗹 or 🗷 as I address them or decide they don't need adressing
|
||||
|
||||
# 🗹 Network - Fields describing the communication path over which the event happened. - https://www.elastic.co/guide/en/ecs/current/ecs-network.html
|
||||
|
||||
# network.direction handled during enrichment pipeline
|
||||
# network.name handled during enrichment pipeline
|
||||
# network.type handled during enrichment pipeline
|
||||
# TODO: some of these done here should probably be done after enrichment, too
|
||||
|
||||
# network.application and network.protocol (TODO: what's the difference as far as my logs go)
|
||||
if ([zeek][service]) {
|
||||
mutate { id => "mutate_add_field_ecs_network_application"
|
||||
add_field => { "[network][application]" => "%{[zeek][service]}" } }
|
||||
mutate { id => "mutate_add_field_ecs_network_protocol"
|
||||
add_field => { "[network][protocol]" => "%{[zeek][service]}" } }
|
||||
}
|
||||
|
||||
# network.packets
|
||||
if ([totPackets]) { mutate { id => "mutate_add_field_ecs_network_packets"
|
||||
add_field => { "[network][packets]" => "%{[totPackets]}" } } }
|
||||
|
||||
# network.bytes
|
||||
if ([totBytes]) { mutate { id => "mutate_add_field_ecs_network_bytes"
|
||||
add_field => { "[network][bytes]" => "%{[totBytes]}" } } }
|
||||
|
||||
# network.community_id
|
||||
if ([zeek][community_id]) { mutate { id => "mutate_add_field_ecs_network_community_id"
|
||||
add_field => { "[network][community_id]" => "%{[zeek][community_id]}" } } }
|
||||
|
||||
# network.iana_number
|
||||
if ([ipProtocol]) { mutate { id => "mutate_add_field_ecs_network_iana_number"
|
||||
add_field => { "[network][iana_number]" => "%{[ipProtocol]}" } } }
|
||||
|
||||
# network.transport
|
||||
if ([zeek][proto]) { mutate { id => "mutate_add_field_ecs_network_transport"
|
||||
add_field => { "[network][transport]" => "%{[zeek][proto]}" } } }
|
||||
|
||||
# 🗹 Client - Fields about the client side of a network connection, used with server. - https://www.elastic.co/guide/en/ecs/current/ecs-client.html
|
||||
|
||||
# client.ip / client.address
|
||||
if ([zeek][orig_h]) {
|
||||
mutate { id => "mutate_add_field_ecs_client_address"
|
||||
add_field => { "[client][address]" => "%{[zeek][orig_h]}" } }
|
||||
mutate { id => "mutate_add_field_ecs_client_ip"
|
||||
add_field => { "[client][ip]" => "%{[zeek][orig_h]}" } }
|
||||
}
|
||||
|
||||
# client.port
|
||||
if ([zeek][orig_p]) { mutate { id => "mutate_add_field_ecs_client_port_orig_p"
|
||||
add_field => { "[client][port]" => "%{[zeek][orig_p]}" } } }
|
||||
|
||||
# client.domain
|
||||
if ([zeek][orig_hostname]) { mutate { id => "mutate_add_field_ecs_client_domain_orig_hostname"
|
||||
add_field => { "[client][domain]" => "%{[zeek][orig_hostname]}" } } }
|
||||
else if ([zeek_dhcp][host_name]) { mutate { id => "mutate_add_field_ecs_client_domain_dhcp_host_name"
|
||||
add_field => { "[client][domain]" => "%{[zeek_dhcp][host_name]}" } } }
|
||||
else if ([zeek_dhcp][domain]) { mutate { id => "mutate_add_field_ecs_client_domain_dhcp_domain"
|
||||
add_field => { "[client][domain]" => "%{[zeek_dhcp][domain]}" } } }
|
||||
else if ([zeek_ntlm][host]) { mutate { id => "mutate_add_field_ecs_client_domain_ntlm_host_name"
|
||||
add_field => { "[client][domain]" => "%{[zeek_ntlm][host]}" } } }
|
||||
else if ([zeek_ntlm][domain]) { mutate { id => "mutate_add_field_ecs_client_domain_ntlm_domain"
|
||||
add_field => { "[client][domain]" => "%{[zeek_ntlm][domain]}" } } }
|
||||
|
||||
# client.mac
|
||||
if ([zeek][orig_l2_addr]) { mutate { id => "mutate_add_field_ecs_client_mac_orig_l2_addr"
|
||||
add_field => { "[client][mac]" => "%{[zeek][orig_l2_addr]}" } } }
|
||||
else if ([zeek_dhcp][mac]) { mutate { id => "mutate_add_field_ecs_client_mac_dhcp_mac"
|
||||
add_field => { "[client][mac]" => "%{[zeek_dhcp][mac]}" } } }
|
||||
else if ([zeek_radius][mac]) { mutate { id => "mutate_add_field_ecs_client_mac_radius_mac"
|
||||
add_field => { "[client][mac]" => "%{[zeek_radius][mac]}" } } }
|
||||
|
||||
# client.bytes
|
||||
if ([zeek_conn][orig_ip_bytes]) { mutate { id => "mutate_add_field_ecs_client_bytes_conn_orig_ip_bytes"
|
||||
add_field => { "[client][bytes]" => "%{[zeek_conn][orig_ip_bytes]}" } } }
|
||||
else if ([zeek_conn][orig_bytes]) { mutate { id => "mutate_add_field_ecs_client_bytes_conn_orig_bytes"
|
||||
add_field => { "[client][bytes]" => "%{[zeek_conn][orig_bytes]}" } } }
|
||||
else if ([zeek_http][request_body_len]) { mutate { id => "mutate_add_field_ecs_client_bytes_http_request_body_len"
|
||||
add_field => { "[client][bytes]" => "%{[zeek_http][request_body_len]}" } } }
|
||||
else if ([zeek_mqtt_publish][payload_len]) { mutate { id => "mutate_add_field_ecs_client_bytes_mqtt_publish_payload_len"
|
||||
add_field => { "[client][bytes]" => "%{[zeek_mqtt_publish][payload_len]}" } } }
|
||||
else if ([zeek_sip][request_body_len]) { mutate { id => "mutate_add_field_ecs_client_bytes_sip_request_body_len"
|
||||
add_field => { "[client][bytes]" => "%{[zeek_sip][request_body_len]}" } } }
|
||||
|
||||
# client.packets
|
||||
if ([zeek_conn][orig_pkts]) { mutate { id => "mutate_add_field_ecs_client_packets_conn_orig_pkts"
|
||||
add_field => { "[client][packets]" => "%{[zeek_conn][orig_pkts]}" } } }
|
||||
|
||||
# 🗹 Server - Fields about the server side of a network connection, used with client. - https://www.elastic.co/guide/en/ecs/current/ecs-server.html
|
||||
|
||||
# server.ip / server.address
|
||||
if ([zeek][resp_h]) {
|
||||
mutate { id => "mutate_add_field_ecs_server_address"
|
||||
add_field => { "[server][address]" => "%{[zeek][resp_h]}" } }
|
||||
mutate { id => "mutate_add_field_ecs_server_ip"
|
||||
add_field => { "[server][ip]" => "%{[zeek][resp_h]}" } }
|
||||
}
|
||||
|
||||
# server.port
|
||||
if ([zeek][resp_p]) { mutate { id => "mutate_add_field_ecs_server_port_resp_p"
|
||||
add_field => { "[server][port]" => "%{[zeek][resp_p]}" } } }
|
||||
|
||||
# server.domain
|
||||
if ([zeek][resp_hostname]) { mutate { id => "mutate_add_field_ecs_server_domain_resp_hostname"
|
||||
add_field => { "[server][domain]" => "%{[zeek][resp_hostname]}" } } }
|
||||
|
||||
# server.mac
|
||||
if ([zeek][resp_l2_addr]) { mutate { id => "mutate_add_field_ecs_server_mac_resp_l2_addr"
|
||||
add_field => { "[server][mac]" => "%{[zeek][resp_l2_addr]}" } } }
|
||||
|
||||
# server.bytes
|
||||
if ([zeek_conn][resp_ip_bytes]) { mutate { id => "mutate_add_field_ecs_server_bytes_conn_resp_ip_bytes"
|
||||
add_field => { "[server][bytes]" => "%{[zeek_conn][resp_ip_bytes]}" } } }
|
||||
else if ([zeek_conn][resp_bytes]) { mutate { id => "mutate_add_field_ecs_server_bytes_conn_resp_bytes"
|
||||
add_field => { "[server][bytes]" => "%{[zeek_conn][resp_bytes]}" } } }
|
||||
else if ([zeek_http][response_body_len]) { mutate { id => "mutate_add_field_ecs_server_bytes_http_response_body_len"
|
||||
add_field => { "[server][bytes]" => "%{[zeek_http][response_body_len]}" } } }
|
||||
else if ([zeek_sip][response_body_len]) { mutate { id => "mutate_add_field_ecs_server_bytes_sip_response_body_len"
|
||||
add_field => { "[server][bytes]" => "%{[zeek_sip][response_body_len]}" } } }
|
||||
|
||||
# server.packets
|
||||
if ([zeek_conn][resp_pkts]) { mutate { id => "mutate_add_field_ecs_server_packets_conn_resp_pkts"
|
||||
add_field => { "[server][packets]" => "%{[zeek_conn][resp_pkts]}" } } }
|
||||
|
||||
# ☐ Event - Fields breaking down the event details. - https://www.elastic.co/guide/en/ecs/current/ecs-event.html
|
||||
|
||||
# event.action from zeek.action
|
||||
if ([zeek][action]) { mutate { id => "mutate_add_field_ecs_event_action"
|
||||
add_field => { "[event][action]" => "%{[zeek][action]}" } } }
|
||||
|
||||
# event.dataset from zeek.logtype
|
||||
mutate { id => "mutate_add_field_ecs_event_dataset"
|
||||
add_field => { "[event][dataset]" => "zeek.%{[zeek][logType]}" } }
|
||||
|
||||
# event.duration
|
||||
if ([zeek_conn][duration]) {
|
||||
# convert duration (floating-point seconds) to nanoseconds
|
||||
ruby {
|
||||
id => "ruby_zeek_duration_to_ecs_event_duration"
|
||||
code => "event.set('[event][duration]', (1000000000 * event.get('[zeek_conn][duration]').to_f).round(0))"
|
||||
}
|
||||
}
|
||||
|
||||
# for event.start/event.end, we'll the moloch firstPacket/lastPacket field as we already did the math
|
||||
if ([firstPacket]) { mutate { id => "mutate_add_field_ecs_event_start"
|
||||
add_field => { "[event][start]" => "%{[firstPacket]}" } } }
|
||||
if ([lastPacket]) { mutate { id => "mutate_add_field_ecs_event_end"
|
||||
add_field => { "[event][end]" => "%{[lastPacket]}" } } }
|
||||
|
||||
# UIDs and FUIDs constitude unique IDs
|
||||
if ([zeek][uid]) { mutate { id => "mutate_add_field_ecs_id_uid"
|
||||
merge => { "[event][id]" => "[zeek][uid]" } } }
|
||||
if ([zeek][fuid]) { mutate { id => "mutate_add_field_ecs_id_fuid"
|
||||
merge => { "[event][id]" => "[zeek][fuid]" } } }
|
||||
|
||||
# event.provider
|
||||
if (![event][provider]) { mutate { id => "mutate_add_field_event_provider_zeek"
|
||||
add_field => { "[event][provider]" => "zeek" } } }
|
||||
|
||||
# event.kind - https://www.elastic.co/guide/en/ecs/current/ecs-allowed-values-event-kind.html
|
||||
if ([zeek_notice]) or ([zeek_signatures]) or ([zeek_weird]) {
|
||||
mutate { id => "mutate_add_field_ecs_event_kind_alert"
|
||||
add_field => { "[event][kind]" => "alert" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_ecs_event_kind_event"
|
||||
add_field => { "[event][kind]" => "event" } }
|
||||
}
|
||||
|
||||
# event.category - https://www.elastic.co/guide/en/ecs/current/ecs-allowed-values-event-category.html
|
||||
translate {
|
||||
id => "translate_zeek_ecs_event_category"
|
||||
field => "[zeek][logType]"
|
||||
destination => "[event][category]"
|
||||
dictionary_path => "/etc/zeek_log_ecs_categories.yaml"
|
||||
}
|
||||
|
||||
# TODO: this gets very granular and varies wildly per protocol, not sure I can translate these 100% from zeek.action and zeek.result
|
||||
# event.type - https://www.elastic.co/guide/en/ecs/current/ecs-allowed-values-event-type.html
|
||||
# event.outcome - https://www.elastic.co/guide/en/ecs/current/ecs-allowed-values-event-outcome.html
|
||||
# Eeesh, this is a swag...
|
||||
# if ([zeek][result]) {
|
||||
# ruby {
|
||||
# id => "ruby_ecs_event_outcome_zeek_result"
|
||||
# code => "
|
||||
# event.get('[zeek][result]').each { |zeekResult|
|
||||
# zeekResult.downcase!
|
||||
# if zeekResult =~ /(abo?rt|bad|busy|close|conflict|crit|declin|denied|deny|disabl|discon|down|err|exceed|exhaust|expir|fail|forbid|illeg|imposs|inappr|incorr|insuff|interrupt|misdirected|nak|no[ _-]*such|overload|problem|refus|reject|terminat|timeout|violat|wrong|(im|dis|mis|un|un|not)[ _-]*(avail|allow|assign|auth|deciph|process|permit|found|support|exist|enough|implem|known|ok|okay|reach|respond|consist|access|satis|succes|valid|want)|too[ _-]*(large|long|small|short|early|late|many|few))/
|
||||
# event.set('[event][outcome]', 'failure')
|
||||
# break
|
||||
# elsif zeekResult =~ /(ok|okay|success|ack|complet|correct|good|ready|finish|valid)/
|
||||
# event.set('[event][outcome]', 'success')
|
||||
# break
|
||||
# end
|
||||
# }
|
||||
# "
|
||||
# }
|
||||
# }
|
||||
|
||||
# ☐ DNS - Fields describing DNS queries and answers. - https://www.elastic.co/guide/en/ecs/current/ecs-dns.html
|
||||
if ([zeek_dns]) {
|
||||
|
||||
# dns.resolved_ip
|
||||
if ([dns][ip]) { mutate { id => "mutate_merge_ecs_dhs_resolved_ip"
|
||||
merge => { "[dns][resolved_ip]" => "[dns][ip]" } } }
|
||||
|
||||
# dns.answers and dns.type:answer
|
||||
if ([zeek_dns][answers]) {
|
||||
ruby {
|
||||
id => "ruby_zeek_dns_answers_to_ecs"
|
||||
code => '
|
||||
event.set("[dns][answers]", [Array(event.get("[zeek_dns][answers]")), Array(event.get("[zeek_dns][TTLs]"))].transpose.map{ |d| Hash[[:data, :ttl].zip(d)] })
|
||||
'}
|
||||
mutate { id => "mutate_add_field_ecs_dns_type_answer"
|
||||
add_field => { "[dns][type]" => "answer" } }
|
||||
}
|
||||
|
||||
# dns.op_code
|
||||
if ([dns][opcode]) { mutate { id => "mutate_add_field_ecs_dns_opcode"
|
||||
add_field => { "[dns][op_code]" => "%{[dns][opcode]}" } } }
|
||||
|
||||
# dns.question.class
|
||||
if ([zeek_dns][qclass_name]) { mutate { id => "mutate_add_field_ecs_dns_qclass"
|
||||
add_field => { "[dns][question][class]" => "%{[zeek_dns][qclass_name]}" } } }
|
||||
|
||||
# dns.question.type
|
||||
if ([zeek_dns][qtype_name]) { mutate { id => "mutate_add_field_ecs_dns_qtype"
|
||||
add_field => { "[dns][question][type]" => "%{[zeek_dns][qtype_name]}" } } }
|
||||
|
||||
# dns.question.name and dns.type:query
|
||||
if ([zeek_dns][query]) {
|
||||
mutate { id => "mutate_add_field_ecs_dns_query"
|
||||
add_field => { "[dns][question][name]" => "%{[zeek_dns][query]}" } }
|
||||
if (![dns][type]) { mutate { id => "mutate_add_field_ecs_dns_type_query"
|
||||
add_field => { "[dns][type]" => "query" } } }
|
||||
}
|
||||
|
||||
if ([dns][type]) {
|
||||
# dns.header_flags
|
||||
if ([zeek][AA] == "T") { mutate { id => "mutate_add_field_ecs_dns_header_flag_aa"
|
||||
add_field => { "[dns][header_flags]" => "AA" } } }
|
||||
if ([zeek][TC] == "T") { mutate { id => "mutate_add_field_ecs_dns_header_flag_tc"
|
||||
add_field => { "[dns][header_flags]" => "TC" } } }
|
||||
if ([zeek][RD] == "T") { mutate { id => "mutate_add_field_ecs_dns_header_flag_rd"
|
||||
add_field => { "[dns][header_flags]" => "RD" } } }
|
||||
if ([zeek][RA] == "T") { mutate { id => "mutate_add_field_ecs_dns_header_flag_ra"
|
||||
add_field => { "[dns][header_flags]" => "RA" } } }
|
||||
}
|
||||
|
||||
# dns.response_code
|
||||
if ([zeek_dns][rcode_name]) { mutate { id => "mutate_add_field_ecs_dns_response_code"
|
||||
add_field => { "[dns][response_code]" => "%{[zeek_dns][rcode_name]}" } } }
|
||||
|
||||
# dns.id
|
||||
if ([zeek_dns][trans_id]) { mutate { id => "mutate_add_field_ecs_dns_id"
|
||||
add_field => { "[dns][id]" => "%{[zeek_dns][trans_id]}" } } }
|
||||
|
||||
# TODO: domain stuff (dns.question.registered_domain, dns.question.subdomain, dns.question.top_level_domain)
|
||||
# perhaps use something like https://github.com/plutonbacon/logstash-filter-publicsuffix
|
||||
}
|
||||
|
||||
# 🗹 File - Fields describing files. - https://www.elastic.co/guide/en/ecs/current/ecs-file.html
|
||||
|
||||
if ([zeek_files]) {
|
||||
|
||||
# file.type
|
||||
mutate { id => "mutate_add_field_ecs_file_type"
|
||||
add_field => { "[file][type]" => "file" } }
|
||||
|
||||
# file.directory, file.name, file.path
|
||||
if ([zeek_files][filename]) {
|
||||
mutate { id => "mutate_add_field_ecs_file_path"
|
||||
add_field => { "[file][path]" => "%{[zeek_files][filename]}" } }
|
||||
grok {
|
||||
id => "grok_zeek_files_filename_ecs"
|
||||
match => { "[zeek_files][filename]" => [ "%{GREEDYDATA:[file][directory]}[\\\/]%{DATA:[file][name]}" ] }
|
||||
}
|
||||
}
|
||||
|
||||
# file.mime_type
|
||||
if ([zeek_files][mime_type]) { mutate { id => "mutate_add_field_ecs_files_mime_type"
|
||||
add_field => { "[file][mime_type]" => "%{[zeek_files][mime_type]}" } } }
|
||||
|
||||
# file.size
|
||||
if ([zeek_files][total_bytes]) { mutate { id => "mutate_add_field_ecs_files_size"
|
||||
add_field => { "[file][size]" => "%{[zeek_files][total_bytes]}" } } }
|
||||
|
||||
# 🗹 Hash - Hashes, usually file hashes. - https://www.elastic.co/guide/en/ecs/current/ecs-hash.html
|
||||
# file.hash.md5,sha1,sha256
|
||||
if ([zeek_files][md5]) { mutate { id => "mutate_add_field_ecs_files_hash_md5"
|
||||
add_field => { "[file][hash][md5]" => "%{[zeek_files][md5]}" } } }
|
||||
if ([zeek_files][sha1]) { mutate { id => "mutate_add_field_ecs_files_hash_sha1"
|
||||
add_field => { "[file][hash][sha1]" => "%{[zeek_files][sha1]}" } } }
|
||||
if ([zeek_files][sha256]) { mutate { id => "mutate_add_field_ecs_files_hash_sha256"
|
||||
add_field => { "[file][hash][sha256]" => "%{[zeek_files][sha256]}" } } }
|
||||
}
|
||||
|
||||
if ([zeek_smb_files]) {
|
||||
|
||||
# from smb_files, file.created,accessed,ctime,mtime,size
|
||||
if ([zeek_smb_files][times_created]) { mutate { id => "mutate_add_field_ecs_smb_created"
|
||||
add_field => { "[file][created]" => "%{[zeek_smb_files][times_created]}" } } }
|
||||
|
||||
if ([zeek_smb_files][times_accessed]) { mutate { id => "mutate_add_field_ecs_smb_accessed"
|
||||
add_field => { "[file][accessed]" => "%{[zeek_smb_files][times_accessed]}" } } }
|
||||
|
||||
if ([zeek_smb_files][times_changed]) { mutate { id => "mutate_add_field_ecs_smb_changed"
|
||||
add_field => { "[file][ctime]" => "%{[zeek_smb_files][times_changed]}" } } }
|
||||
|
||||
if ([zeek_smb_files][times_modified]) { mutate { id => "mutate_add_field_ecs_smb_modified"
|
||||
add_field => { "[file][mtime]" => "%{[zeek_smb_files][times_modified]}" } } }
|
||||
|
||||
if ([zeek_smb_files][size]) { mutate { id => "mutate_add_field_ecs_smb_size"
|
||||
add_field => { "[file][size]" => "%{[zeek_smb_files][size]}" } } }
|
||||
|
||||
# file.name from smb_files.name
|
||||
if (![file][name]) and ([zeek_smb_files][name]) {
|
||||
mutate { id => "mutate_add_field_ecs_file_smb_files_name"
|
||||
add_field => { "[file][name]" => "%{[zeek_smb_files][name]}" } }
|
||||
}
|
||||
}
|
||||
|
||||
# file.directory from zeek_smb_files.smb_path
|
||||
if ([@metadata][smb_path]) {
|
||||
if (![file][type]) { mutate { id => "mutate_add_field_ecs_file_type_smb_path"
|
||||
add_field => { "[file][type]" => "file" } } }
|
||||
|
||||
mutate { id => "mutate_add_field_ecs_file_directory_from_smb"
|
||||
add_field => { "[file][directory]" => "%{[@metadata][smb_path]}" } }
|
||||
}
|
||||
|
||||
# file.path from file.directory and file.name, if present and not already populated
|
||||
if ([file][directory]) and (![file][path]) {
|
||||
if ([file][name]) {
|
||||
mutate { id => "mutate_add_field_ecs_path_from_dir_and_name"
|
||||
add_field => { "[file][path]" => "%{[file][directory]}/%{[file][name]}" } }
|
||||
} else {
|
||||
mutate { id => "mutate_add_field_ecs_path_from_dir_only"
|
||||
add_field => { "[file][path]" => "%{[file][directory]}" } }
|
||||
}
|
||||
}
|
||||
|
||||
if ([file][name]) {
|
||||
if (![file][type]) { mutate { id => "mutate_add_field_ecs_file_type_name"
|
||||
add_field => { "[file][type]" => "file" } } }
|
||||
|
||||
# file.extension
|
||||
grok {
|
||||
id => "grok_zeek_files_fileext_ecs"
|
||||
match => { "[file][name]" => [ "%{GREEDYDATA}\.%{DATA:[file][extension]}" ] }
|
||||
}
|
||||
}
|
||||
|
||||
# 🗹 HTTP - Fields describing an HTTP request. - https://www.elastic.co/guide/en/ecs/current/ecs-http.html
|
||||
if ([zeek_http]) {
|
||||
|
||||
if ([zeek_http][request_body_len]) { mutate { id => "mutate_add_field_ecs_http_request_body_bytes"
|
||||
add_field => { "[http][request][body][bytes]" => "%{[zeek_http][request_body_len]}" } } }
|
||||
|
||||
if ([zeek_http][method]) { mutate { id => "mutate_add_field_ecs_http_request_method"
|
||||
add_field => { "[http][request][method]" => "%{[zeek_http][method]}" } } }
|
||||
|
||||
if ([zeek_http][referrer]) { mutate { id => "mutate_add_field_ecs_http_request_referrer"
|
||||
add_field => { "[http][request][referrer]" => "%{[zeek_http][referrer]}" } } }
|
||||
|
||||
if ([zeek_http][response_body_len]) { mutate { id => "mutate_add_field_ecs_http_response_body_bytes"
|
||||
add_field => { "[http][response][body][bytes]" => "%{[zeek_http][response_body_len]}" } } }
|
||||
|
||||
if ([zeek_http][status_code]) { mutate { id => "mutate_add_field_ecs_http_response_status_cocde"
|
||||
add_field => { "[http][response][status_cocde]" => "%{[zeek_http][status_code]}" } } }
|
||||
|
||||
if ([zeek_http][version]) { mutate { id => "mutate_add_field_ecs_http_version"
|
||||
add_field => { "[http][version]" => "%{[zeek_http][version]}" } } }
|
||||
|
||||
# ☐ URL - Fields that let you store URLs in various forms. - https://www.elastic.co/guide/en/ecs/current/ecs-url.html
|
||||
# todo: handle URIs from other protocols (SIP, FTP, ...)
|
||||
if ([zeek_http][uri]) or ([zeek_http][host]) {
|
||||
ruby {
|
||||
id => "ruby_ecs_uri_parse_from_zeek_http"
|
||||
init => "require 'uri'"
|
||||
code => "
|
||||
scheme = 'http'
|
||||
user = event.get('[zeek][user]')
|
||||
password = event.get('[zeek][password]')
|
||||
host = event.get('[zeek_http][host]')
|
||||
port = event.get('[zeek][resp_p]')
|
||||
uri = event.get('[zeek_http][uri]')
|
||||
ext = (uri.nil? || !(uri.include? '/')) ? nil : File.extname(uri).partition('.').last.split(/[\?#]/)[0]
|
||||
fragment = uri.nil? ? nil : uri.partition('#').last
|
||||
query = uri.nil? ? nil : uri.partition('?').last
|
||||
event.set('[url][scheme]', scheme)
|
||||
event.set('[url][original]', scheme + '://' + (host.nil? ? '' : host) + (uri.nil? ? '' : uri))
|
||||
event.set('[url][full]', scheme + '://' + (user.nil? ? '' : Array(user).first) + (password.nil? ? '' : ':' + password) + ((user.nil? && password.nil?) ? '' : '@') + (host.nil? ? '' : host) + (port.nil? ? '' : ':' + port) + (uri.nil? ? '' : uri))
|
||||
event.set('[url][domain]', host) unless host.nil?
|
||||
event.set('[url][extension]', ext) unless ext.nil? || ext.empty?
|
||||
event.set('[url][fragment]', fragment) unless fragment.nil? || fragment.empty?
|
||||
event.set('[url][password]', password) unless password.nil?
|
||||
event.set('[url][path]', uri) unless uri.nil?
|
||||
event.set('[url][port]', port) unless port.nil?
|
||||
event.set('[url][query]', query) unless query.nil? || query.empty?
|
||||
event.set('[url][user]', Array(user).first) unless user.nil?
|
||||
"
|
||||
# TODO: domain stuff (url.registered_domain, url.top_level_domain)
|
||||
# perhaps use something like https://github.com/plutonbacon/logstash-filter-publicsuffix
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# 🗹 Related - Fields meant to facilitate pivoting around a piece of data. - https://www.elastic.co/guide/en/ecs/current/ecs-related.html
|
||||
|
||||
# related.user (zeek.user is already the array we want)
|
||||
if ([zeek][user]) { mutate { id => "mutate_merge_field_related_zeek_user"
|
||||
merge => { "[related][user]" => "[zeek][user]" } } }
|
||||
|
||||
# related.hash (accumulate all hash/fingerprint fields into related.hash)
|
||||
if ([zeek_files][md5]) { mutate { id => "mutate_merge_field_related_hash_files_md5"
|
||||
merge => { "[related][hash]" => "[zeek_files][md5]" } } }
|
||||
if ([zeek_files][sha1]) { mutate { id => "mutate_merge_field_related_hash_files_sha1"
|
||||
merge => { "[related][hash]" => "[zeek_files][sha1]" } } }
|
||||
if ([zeek_files][sha256]) { mutate { id => "mutate_merge_field_related_hash_files_sha256"
|
||||
merge => { "[related][hash]" => "[zeek_files][sha256]" } } }
|
||||
if ([zeek_ssh][hassh]) { mutate { id => "mutate_merge_field_related_hash_ssh_hassh"
|
||||
merge => { "[related][hash]" => "[zeek_ssh][hassh]" } } }
|
||||
if ([zeek_ssh][hasshServer]) { mutate { id => "mutate_merge_field_related_hash_ssh_hasshServer"
|
||||
merge => { "[related][hash]" => "[zeek_ssh][hasshServer]" } } }
|
||||
if ([zeek_ssl][ja3]) { mutate { id => "mutate_merge_field_related_hash_ssl_ja3"
|
||||
merge => { "[related][hash]" => "[zeek_ssl][ja3]" } } }
|
||||
if ([zeek_ssl][ja3s]) { mutate { id => "mutate_merge_field_related_hash_zeek_ssl_ja3s"
|
||||
merge => { "[related][hash]" => "[zeek_ssl][ja3s]" } } }
|
||||
|
||||
# related.ip (all IP-type fields get rolled up into related.ip)
|
||||
if ([zeek][destination_geo][ip]) { mutate { id => "mutate_merge_field_related_ip_zeek_destination_geo_ip"
|
||||
merge => { "[related][ip]" => "[zeek][destination_geo][ip]" } } }
|
||||
if ([zeek][orig_h]) { mutate { id => "mutate_merge_field_related_ip_zeek_orig_h"
|
||||
merge => { "[related][ip]" => "[zeek][orig_h]" } } }
|
||||
if ([zeek][resp_h]) { mutate { id => "mutate_merge_field_related_ip_zeek_resp_h"
|
||||
merge => { "[related][ip]" => "[zeek][resp_h]" } } }
|
||||
if ([zeek][source_geo][ip]) { mutate { id => "mutate_merge_field_related_ip_zeek_source_geo_ip"
|
||||
merge => { "[related][ip]" => "[zeek][source_geo][ip]" } } }
|
||||
if ([zeek_dhcp][assigned_ip]) { mutate { id => "mutate_merge_field_related_ip_zeek_dhcp_assigned_ip"
|
||||
merge => { "[related][ip]" => "[zeek_dhcp][assigned_ip]" } } }
|
||||
if ([zeek_dhcp][requested_ip]) { mutate { id => "mutate_merge_field_related_ip_zeek_dhcp_requested_ip"
|
||||
merge => { "[related][ip]" => "[zeek_dhcp][requested_ip]" } } }
|
||||
if ([zeek_enip_list_identity][device_ip]) { mutate { id => "mutate_merge_field_related_ip_zeek_enip_list_identity_device_ip"
|
||||
merge => { "[related][ip]" => "[zeek_enip_list_identity][device_ip]" } } }
|
||||
if ([zeek_files][rx_hosts]) { mutate { id => "mutate_merge_field_related_ip_zeek_files_rx_hosts"
|
||||
merge => { "[related][ip]" => "[zeek_files][rx_hosts]" } } }
|
||||
if ([zeek_files][tx_hosts]) { mutate { id => "mutate_merge_field_related_ip_zeek_files_tx_hosts"
|
||||
merge => { "[related][ip]" => "[zeek_files][tx_hosts]" } } }
|
||||
if ([zeek_ftp][data_channel_orig_h]) { mutate { id => "mutate_merge_field_related_ip_zeek_ftp_data_channel_orig_h"
|
||||
merge => { "[related][ip]" => "[zeek_ftp][data_channel_orig_h]" } } }
|
||||
if ([zeek_ftp][data_channel_resp_h]) { mutate { id => "mutate_merge_field_related_ip_zeek_ftp_data_channel_resp_h"
|
||||
merge => { "[related][ip]" => "[zeek_ftp][data_channel_resp_h]" } } }
|
||||
if ([zeek_notice][dst]) { mutate { id => "mutate_merge_field_related_ip_zeek_notice_dst"
|
||||
merge => { "[related][ip]" => "[zeek_notice][dst]" } } }
|
||||
if ([zeek_notice][src]) { mutate { id => "mutate_merge_field_related_ip_zeek_notice_src"
|
||||
merge => { "[related][ip]" => "[zeek_notice][src]" } } }
|
||||
if ([zeek_radius][framed_addr]) { mutate { id => "mutate_merge_field_related_ip_zeek_radius_framed_addr"
|
||||
merge => { "[related][ip]" => "[zeek_radius][framed_addr]" } } }
|
||||
if ([zeek_smtp][path]) { mutate { id => "mutate_merge_field_related_ip_zeek_smtp_path"
|
||||
merge => { "[related][ip]" => "[zeek_smtp][path]" } } }
|
||||
if ([zeek_smtp][x_originating_ip]) { mutate { id => "mutate_merge_field_related_ip_zeek_smtp_x_originating_ip"
|
||||
merge => { "[related][ip]" => "[zeek_smtp][x_originating_ip]" } } }
|
||||
if ([zeek_socks][bound_host]) { mutate { id => "mutate_merge_field_related_ip_zeek_socks_bound_host"
|
||||
merge => { "[related][ip]" => "[zeek_socks][bound_host]" } } }
|
||||
if ([zeek_socks][request_host]) { mutate { id => "mutate_merge_field_related_ip_zeek_socks_request_host"
|
||||
merge => { "[related][ip]" => "[zeek_socks][request_host]" } } }
|
||||
if ([zeek_x509][san_ip]) { mutate { id => "mutate_merge_field_related_ip_zeek_x509_san_ip"
|
||||
merge => { "[related][ip]" => "[zeek_x509][san_ip]" } } }
|
||||
if ([related][ip]) {
|
||||
ruby {
|
||||
id => "ruby_related_ip_uniq"
|
||||
code => "event.set('[related][ip]', event.get('[related][ip]').uniq)"
|
||||
}
|
||||
}
|
||||
|
||||
# 🗹 Rule - Fields to capture details about rules used to generate alerts or other notable events. - https://www.elastic.co/guide/en/ecs/current/ecs-rule.html
|
||||
# - signatures
|
||||
# - engine - >rule.author
|
||||
# - signature_id -> rule.name
|
||||
# - event_msg -> rule.description
|
||||
# - notice
|
||||
# - category -> rule.category, rule.author (mapped), rule.reference (mapped), rule.license (mapped)
|
||||
# - sub_category -> rule.name
|
||||
# - weird
|
||||
# - name -> rule.name
|
||||
|
||||
if ([zeek_signatures]) {
|
||||
if ([zeek_signatures][engine]) { mutate { id => "mutate_merge_field_ecs_rule_author_signatures_engine"
|
||||
merge => { "[rule][author]" => "[zeek_signatures][engine]" } } }
|
||||
|
||||
if ([zeek_signatures][signature_id]) { mutate { id => "mutate_add_field_ecs_rule_id_signature_name"
|
||||
merge => { "[rule][name]" => "[zeek_signatures][signature_id]" } } }
|
||||
|
||||
if ([zeek_signatures][event_msg]) { mutate { id => "mutate_add_field_ecs_rule_id_signature_event_msg" merge => { "[rule][description]" => "[zeek_signatures][event_msg]" } } }
|
||||
}
|
||||
|
||||
if ([zeek_notice]) {
|
||||
|
||||
mutate { id => "mutate_add_field_ecs_rule_ruleset_notice_zeek"
|
||||
add_field => { "[rule][ruleset]" => "Zeek Notices" } }
|
||||
|
||||
if ([zeek_notice][category]) { mutate { id => "mutate_add_field_ecs_rule_category_notice_category"
|
||||
add_field => { "[rule][category]" => "%{[zeek_notice][category]}" } } }
|
||||
|
||||
if ([zeek_notice][sub_category]) { mutate { id => "mutate_add_field_ecs_rule_category_notice_sub_category"
|
||||
add_field => { "[rule][name]" => "%{[zeek_notice][sub_category]}" } } }
|
||||
|
||||
translate {
|
||||
id => "translate_zeek_notice_author"
|
||||
field => "[zeek_notice][category]"
|
||||
destination => "[@metadata][zeek_noticed_mapped_author]"
|
||||
dictionary_path => "/etc/notice_authors.yaml"
|
||||
fallback => "Zeek"
|
||||
}
|
||||
if ([@metadata][zeek_noticed_mapped_author]) {
|
||||
mutate { id => "mutate_merge_zeek_noticed_mapped_author"
|
||||
merge => { "[rule][author]" => "[@metadata][zeek_noticed_mapped_author]" } }
|
||||
}
|
||||
|
||||
translate {
|
||||
id => "translate_zeek_notice_reference"
|
||||
field => "[zeek_notice][category]"
|
||||
destination => "[@metadata][zeek_noticed_mapped_reference]"
|
||||
dictionary_path => "/etc/notice_reference.yaml"
|
||||
fallback => "https://docs.zeek.org/en/current/zeek-noticeindex.html"
|
||||
}
|
||||
if ([@metadata][zeek_noticed_mapped_reference]) {
|
||||
mutate { id => "mutate_merge_zeek_noticed_mapped_reference"
|
||||
merge => { "[rule][reference]" => "[@metadata][zeek_noticed_mapped_reference]" } }
|
||||
}
|
||||
|
||||
translate {
|
||||
id => "translate_zeek_notice_license"
|
||||
field => "[zeek_notice][category]"
|
||||
destination => "[@metadata][zeek_noticed_mapped_license]"
|
||||
dictionary_path => "/etc/notice_license.yaml"
|
||||
fallback => "https://raw.githubusercontent.com/zeek/zeek/master/COPYING"
|
||||
}
|
||||
if ([@metadata][zeek_noticed_mapped_license]) {
|
||||
mutate { id => "mutate_merge_zeek_noticed_mapped_license"
|
||||
merge => { "[rule][license]" => "[@metadata][zeek_noticed_mapped_license]" } }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if ([zeek_weird][name]) {
|
||||
mutate { id => "mutate_add_field_ecs_rule_author_zeek_weird"
|
||||
add_field => { "[rule][author]" => "Zeek" } }
|
||||
mutate { id => "mutate_add_field_ecs_rule_ruleset_zeek_weird"
|
||||
add_field => { "[rule][ruleset]" => "Zeek Weird Logs" } }
|
||||
mutate { id => "mutate_add_field_ecs_rule_reference_zeek_weird"
|
||||
add_field => { "[rule][reference]" => "https://docs.zeek.org/en/current/scripts/base/frameworks/notice/weird.zeek.html" } }
|
||||
mutate { id => "mutate_add_field_ecs_rule_name_weird_name"
|
||||
add_field => { "[rule][name]" => "%{[zeek_weird][name]}" } }
|
||||
}
|
||||
|
||||
# 🗹 Threat - Fields to classify events and alerts according to a threat taxonomy. - https://www.elastic.co/guide/en/ecs/current/ecs-threat.html
|
||||
if ([zeek_notice]) {
|
||||
|
||||
if ([zeek_notice][category] == "ATTACK") {
|
||||
|
||||
# populate threat information for MITRE ATT&CK notices from mitre-attack/bzar plugin
|
||||
mutate { id => "mutate_add_field_ecs_threat_framework_mitre_attack"
|
||||
add_field => { "[threat][framework]" => "MITRE ATT&CK" } }
|
||||
|
||||
if ([zeek_notice][sub_category]) {
|
||||
mutate { id => "mutate_add_field_ecs_threat_tactic_name_mitre"
|
||||
add_field => { "[threat][tactic][name]" => "%{[zeek_notice][sub_category]}" } }
|
||||
mutate { id => "mutate_gsub_ecs_threat_tactic_name_notice_sub"
|
||||
gsub => [ "[threat][tactic][name]", "_,", " " ] }
|
||||
translate {
|
||||
id => "translate_zeek_mitre_attack_tactic_name_to_id"
|
||||
field => "[zeek_notice][sub_category]"
|
||||
destination => "[threat][tactic][id]"
|
||||
dictionary_path => "/etc/mitre_attack_tactic_ids.yaml"
|
||||
}
|
||||
translate {
|
||||
id => "translate_zeek_mitre_attack_tactic_name_to_reference"
|
||||
field => "[zeek_notice][sub_category]"
|
||||
destination => "[threat][tactic][reference]"
|
||||
dictionary_path => "/etc/mitre_attack_tactic_reference.yaml"
|
||||
fallback => "https://attack.mitre.org/tactics/enterprise/"
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_notice][sub]) and ([zeek_notice][sub] =~ /^T/) {
|
||||
# eg., T1077 Windows Admin Shares + T1105 Remote File Copy
|
||||
ruby {
|
||||
id => "ruby_ecs_threat_technique_from_attack"
|
||||
code => "
|
||||
idArray = Array.new
|
||||
nameArray = Array.new
|
||||
event.get('[zeek_notice][sub]').split('+').each do |technique|
|
||||
id, name = technique.strip.match(/(^T.*?)\s+(.+$)/).captures
|
||||
idArray.push(id) unless id.nil?
|
||||
nameArray.push(name) unless name.nil?
|
||||
end
|
||||
event.set('[threat][technique][id]', idArray)
|
||||
event.set('[threat][technique][name]', nameArray)
|
||||
event.set('[threat][technique][reference]', idArray.clone.map(&:clone).map{|x| x.prepend('https://attack.mitre.org/techniques/')})
|
||||
"
|
||||
}
|
||||
}
|
||||
|
||||
} else if ([zeek_notice][category] == "EternalSafety") {
|
||||
# populate threat information for EternalSafety from 0xl3x1/zeek-EternalSafety plugin
|
||||
mutate { id => "mutate_add_field_ecs_threat_framework_eternal_safety"
|
||||
add_field => { "[threat][framework]" => "EternalSafety" } }
|
||||
if ([zeek_notice][sub_category]) { mutate { id => "mutate_add_field_ecs_threat_technique_name_eternal"
|
||||
add_field => { "[threat][technique][name]" => "%{[zeek_notice][sub_category]}" } } }
|
||||
if ([rule][reference]) { mutate { id => "mutate_add_field_ecs_threat_technique_reference_eternal"
|
||||
add_field => { "[threat][technique][reference]" => "%{[rule][reference]}" } } }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
# 🗹 TLS - Fields describing a TLS connection. - https://www.elastic.co/guide/en/ecs/current/ecs-tls.html
|
||||
if ([zeek_ssl]) {
|
||||
|
||||
if ([zeek_ssl][ssl_version]) {
|
||||
# turn TLSv10, TLSv13, TSLv12, etc. to 'tls' and '1.2', etc.
|
||||
# TODO: tls.cipher already exists as a Arkime field, will this conflict/duplicate that?
|
||||
# EDIT: it won't duplicate it, but it will replace it. I guess that's okay for now.
|
||||
ruby {
|
||||
id => "ruby_ecs_ssl_version_parse"
|
||||
code => "
|
||||
verMatch = event.get('[zeek_ssl][ssl_version]').tr('.', '').match(/(.+)\s*[v-]\s*([\d\.]+)/i)
|
||||
verParts = verMatch.nil? ? nil : verMatch.captures
|
||||
unless verParts.nil?
|
||||
event.set('[tls][version_protocol]', verParts[0].downcase)
|
||||
event.set('[tls][version]', verParts[1].split(//).join('.'))
|
||||
end
|
||||
"
|
||||
}
|
||||
}
|
||||
|
||||
if ([zeek_ssl][established]) { mutate { id => "mutate_add_field_ecs_zeek_tls_established"
|
||||
add_field => { "[tls][established]" => "%{[zeek_ssl][established]}" } } }
|
||||
|
||||
if ([zeek_ssl][resumed]) { mutate { id => "mutate_add_field_ecs_zeek_tls_resumed"
|
||||
add_field => { "[tls][resumed]" => "%{[zeek_ssl][resumed]}" } } }
|
||||
|
||||
if ([zeek_ssl][next_protocol]) {
|
||||
mutate { id => "mutate_add_field_ecs_zeek_tls_next_protocol"
|
||||
add_field => { "[tls][next_protocol]" => "%{[zeek_ssl][next_protocol]}" } }
|
||||
mutate { id => "mutate_lowercase_field_ecs_zeek_tls_next_protocol"
|
||||
lowercase => [ "[tls][next_protocol]" ] }
|
||||
}
|
||||
|
||||
# TODO: tls.cipher already exists as a Arkime field, will this conflict/duplicate that?
|
||||
# EDIT: apparently it does duplicate the value, so I'm commenting this out for now...
|
||||
#if ([zeek_ssl][cipher]) { mutate { id => "mutate_add_field_ecs_zeek_tls_cipher"
|
||||
# add_field => { "[tls][cipher]" => "%{[zeek_ssl][cipher]}" } } }
|
||||
|
||||
if ([zeek_ssl][curve]) { mutate { id => "mutate_add_field_ecs_zeek_tls_client_curve"
|
||||
add_field => { "[tls][curve]" => "%{[zeek_ssl][curve]}" } } }
|
||||
|
||||
if ([zeek_ssl][ja3]) { mutate { id => "mutate_add_field_ecs_zeek_tls_client_ja3"
|
||||
add_field => { "[tls][client][ja3]" => "%{[zeek_ssl][ja3]}" } } }
|
||||
|
||||
if ([zeek_ssl][client_issuer_full]) { mutate { id => "mutate_add_field_ecs_zeek_tls_client_issuer_full"
|
||||
add_field => { "[tls][client][issuer]" => "%{[zeek_ssl][client_issuer_full]}" } } }
|
||||
|
||||
if ([zeek_ssl][client_subject_full]) { mutate { id => "mutate_add_field_ecs_zeek_tls_client_subject_full"
|
||||
add_field => { "[tls][client][subject]" => "%{[zeek_ssl][client_subject_full]}" } } }
|
||||
|
||||
if ([zeek_ssl][server_name]) {
|
||||
mutate { id => "mutate_add_field_ecs_zeek_tls_client_server_name"
|
||||
add_field => { "[tls][client][server_name]" => "%{[zeek_ssl][server_name]}" } }
|
||||
mutate { id => "mutate_add_field_ecs_zeek_tls_client_server_name_destination_domain"
|
||||
add_field => { "[destination][domain]" => "%{[zeek_ssl][server_name]}" } }
|
||||
}
|
||||
|
||||
if ([zeek_ssl][issuer_full]) { mutate { id => "mutate_add_field_ecs_zeek_tls_issuer_full"
|
||||
add_field => { "[tls][server][issuer]" => "%{[zeek_ssl][issuer_full]}" } } }
|
||||
|
||||
if ([zeek_ssl][ja3s]) { mutate { id => "mutate_add_field_ecs_zeek_tls_server_ja3s"
|
||||
add_field => { "[tls][server][ja3s]" => "%{[zeek_ssl][ja3s]}" } } }
|
||||
|
||||
if ([zeek_ssl][subject_full]) { mutate { id => "mutate_add_field_ecs_zeek_tls_subject_full"
|
||||
add_field => { "[tls][server][subject]" => "%{[zeek_ssl][subject_full]}" } } }
|
||||
}
|
||||
|
||||
# ☐ User agent - Fields to describe a browser user_agent string. - https://www.elastic.co/guide/en/ecs/current/ecs-user_agent.html
|
||||
# - TODO: potentially more parsing could be done for user agent strings (.name, .device.name, .version)
|
||||
|
||||
if ([zeek_gquic][user_agent]) { mutate { id => "mutate_add_field_metadata_http_ecs_useragent_gquic"
|
||||
add_field => { "[@metadata][generic_user_agent]" => "%{[zeek_gquic][user_agent]}" } } }
|
||||
if ([zeek_http][user_agent]) { mutate { id => "mutate_add_field_metadata_http_ecs_useragent_http"
|
||||
add_field => { "[@metadata][generic_user_agent]" => "%{[zeek_http][user_agent]}" } } }
|
||||
if ([zeek_sip][user_agent]) { mutate { id => "mutate_add_field_metadata_http_ecs_useragent_sip"
|
||||
add_field => { "[@metadata][generic_user_agent]" => "%{[zeek_sip][user_agent]}" } } }
|
||||
if ([zeek_smtp][user_agent]) { mutate { id => "mutate_add_field_metadata_http_ecs_useragent_smtp"
|
||||
add_field => { "[@metadata][generic_user_agent]" => "%{[zeek_smtp][user_agent]}" } } }
|
||||
if ([@metadata][generic_user_agent]) {
|
||||
mutate { id => "mutate_add_field_ecs_user_agent_original_zeek"
|
||||
add_field => { "[user_agent][original]" => "%{[@metadata][generic_user_agent]}" } }
|
||||
}
|
||||
|
||||
# ☐ Agent - Fields about the monitoring agent. - https://www.elastic.co/guide/en/ecs/current/ecs-agent.html
|
||||
# - agent will be set for logs coming from a sensor (hedgehog)
|
||||
# - double-check agent set for local Malcolm filebeat Zeek logs to ensure it's set correctly, too
|
||||
# ☐ Observer - Fields describing an entity observing the event from outside the host. - https://www.elastic.co/guide/en/ecs/current/ecs-observer.html
|
||||
# - anything useful we could get here from either Malcolm or Hedgehog?
|
||||
# ☐ Destination - Fields about the destination side of a network connection, used with source. - https://www.elastic.co/guide/en/ecs/current/ecs-destination.html
|
||||
# ☐ Source - Fields about the source side of a network connection, used with destination. - https://www.elastic.co/guide/en/ecs/current/ecs-source.html
|
||||
# - I have client/server, do I need to do anything with this as well?
|
||||
# ☐ Error - Fields about errors of any kind. - https://www.elastic.co/guide/en/ecs/current/ecs-error.html
|
||||
# - There could be a lot of cases where there are errors, do we lump them all in here? we'd need to idenfity
|
||||
# instances of error, error_msg, reply, status code, etc...
|
||||
# ☐ User - Fields to describe the user relevant to the event. - https://www.elastic.co/guide/en/ecs/current/ecs-user.html
|
||||
# - a *lot* of the details ECS wants for the user (client, destination, email, domain, etc.) aren't provided by Zeek
|
||||
# also, it appears that there is an issue with type mismatch between Arkime's "user" field and ECS "user.name", etc.
|
||||
# ☐ Vulnerability - Fields to describe the vulnerability relevant to an event. - https://www.elastic.co/guide/en/ecs/current/ecs-vulnerability.html
|
||||
# - There are some CVE zeek plugins, they may be mappable to this (?)
|
||||
# ☐ VLAN - Fields to describe observed VLAN information. - https://www.elastic.co/guide/en/ecs/current/ecs-vlan.html
|
||||
# - conflicts with Arkime's VLAN field:
|
||||
# Can't merge a non object mapping [vlan] with an object mapping [vlan]", "caused_by"=>{"type"=>"illegal_argument_exception",
|
||||
# "reason"=>"Can't merge a non object mapping [vlan] with an object mapping [vlan]
|
||||
|
||||
# 🗷 Base - All fields defined directly at the top level - https://www.elastic.co/guide/en/ecs/current/ecs-base.html
|
||||
# 🗷 Cloud - Fields about the cloud resource. - https://www.elastic.co/guide/en/ecs/current/ecs-cloud.html
|
||||
# 🗷 Code Signature - These fields contain information about binary code signatures. - https://www.elastic.co/guide/en/ecs/current/ecs-code_signature.html
|
||||
# 🗷 Container - Fields describing the container that generated this event. - https://www.elastic.co/guide/en/ecs/current/ecs-container.html
|
||||
# 🗷 DLL - These fields contain information about code libraries dynamically loaded into processes. - https://www.elastic.co/guide/en/ecs/current/ecs-dll.html
|
||||
# 🗷 ECS - Meta-information specific to ECS. - https://www.elastic.co/guide/en/ecs/current/ecs-ecs.html
|
||||
# 🗷 Group - User's group relevant to the event. - https://www.elastic.co/guide/en/ecs/current/ecs-group.html
|
||||
# 🗷 Host - Fields describing the relevant computing instance. - https://www.elastic.co/guide/en/ecs/current/ecs-host.html
|
||||
# 🗷 Interface - Fields to describe observer interface information. - https://www.elastic.co/guide/en/ecs/current/ecs-interface.html
|
||||
# 🗷 Log - Details about the event's logging mechanism. - https://www.elastic.co/guide/en/ecs/current/ecs-log.html
|
||||
# 🗷 Operating System - OS fields contain information about the operating system. - https://www.elastic.co/guide/en/ecs/current/ecs-os.html
|
||||
# 🗷 Organization - Fields describing the organization or company the event is associated with. - https://www.elastic.co/guide/en/ecs/current/ecs-organization.html
|
||||
# 🗷 Package - These fields contain information about an installed software package. - https://www.elastic.co/guide/en/ecs/current/ecs-package.html
|
||||
# - I almost mapped "software" to this but it doesn't really line up (installed packages vs. software network traffic observed)
|
||||
# 🗷 PE Header - These fields contain Windows Portable Executable (PE) metadata. - https://www.elastic.co/guide/en/ecs/current/ecs-pe.html
|
||||
# - You would think zeek_pe would line up here, but this is just header stuff specific to windows executables and there's not much that lines up
|
||||
# 🗷 Process - These fields contain information about a process. - https://www.elastic.co/guide/en/ecs/current/ecs-process.html
|
||||
# 🗷 Registry - Fields related to Windows Registry operations. - https://www.elastic.co/guide/en/ecs/current/ecs-registry.html
|
||||
# 🗷 Service - Fields describing the service for or from which the data was collected. - https://www.elastic.co/guide/en/ecs/current/ecs-service.html
|
||||
# 🗷 Tracing - Fields related to distributed tracing. - https://www.elastic.co/guide/en/ecs/current/ecs-tracing.html
|
||||
|
||||
} # end if ENV_LOGSTASH_ZEEK_TO_ECS
|
||||
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
output {
|
||||
pipeline {
|
||||
send_to => ["log-enrichment"]
|
||||
}
|
||||
}
|
||||
312
Vagrant/resources/malcolm/logstash/scripts/ip-to-segment-logstash.py
Executable file
312
Vagrant/resources/malcolm/logstash/scripts/ip-to-segment-logstash.py
Executable file
@@ -0,0 +1,312 @@
|
||||
#!/usr/bin/env python2
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright (c) 2021 Battelle Energy Alliance, LLC. All rights reserved.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import argparse
|
||||
import struct
|
||||
import ipaddress
|
||||
import itertools
|
||||
import json
|
||||
import pprint
|
||||
import uuid
|
||||
from collections import defaultdict
|
||||
|
||||
UNSPECIFIED_TAG = '<~<~<none>~>~>'
|
||||
HOST_LIST_IDX = 0
|
||||
SEGMENT_LIST_IDX = 1
|
||||
|
||||
JSON_MAP_TYPE_SEGMENT = 'segment'
|
||||
JSON_MAP_TYPE_HOST = 'host'
|
||||
JSON_MAP_KEY_ADDR = 'address'
|
||||
JSON_MAP_KEY_NAME = 'name'
|
||||
JSON_MAP_KEY_TAG = 'tag'
|
||||
JSON_MAP_KEY_TYPE = 'type'
|
||||
|
||||
###################################################################################################
|
||||
# print to stderr
|
||||
def eprint(*args, **kwargs):
|
||||
print(*args, file=sys.stderr, **kwargs)
|
||||
|
||||
###################################################################################################
|
||||
# recursively convert unicode strings to utf-8 strings
|
||||
def byteify(input):
|
||||
if isinstance(input, dict):
|
||||
return {byteify(key): byteify(value)
|
||||
for key, value in input.iteritems()}
|
||||
elif isinstance(input, list):
|
||||
return [byteify(element) for element in input]
|
||||
elif isinstance(input, unicode):
|
||||
return input.encode('utf-8')
|
||||
else:
|
||||
return input
|
||||
|
||||
###################################################################################################
|
||||
# main
|
||||
def main():
|
||||
|
||||
# extract arguments from the command line
|
||||
# print (sys.argv[1:]);
|
||||
parser = argparse.ArgumentParser(description='Logstash IP address to Segment Filter Creator', add_help=False, usage='ip-to-segment-logstash.py <arguments>')
|
||||
parser.add_argument('-m', '--mixed', dest='mixedInput', metavar='<STR>', type=str, nargs='*', default='', help='Input mixed JSON mapping file(s)')
|
||||
parser.add_argument('-s', '--segment', dest='segmentInput', metavar='<STR>', type=str, nargs='*', default='', help='Input segment mapping file(s)')
|
||||
parser.add_argument('-h', '--host', dest='hostInput', metavar='<STR>', type=str, nargs='*', default='', help='Input host mapping file(s)')
|
||||
parser.add_argument('-o', '--output', dest='output', metavar='<STR>', type=str, default='-', help='Output file')
|
||||
try:
|
||||
parser.error = parser.exit
|
||||
args = parser.parse_args()
|
||||
except SystemExit:
|
||||
parser.print_help()
|
||||
exit(2)
|
||||
|
||||
# read each input file into its own list
|
||||
segmentLines = []
|
||||
hostLines = []
|
||||
mixedEntries = []
|
||||
|
||||
for inFile in args.segmentInput:
|
||||
if os.path.isfile(inFile):
|
||||
segmentLines.extend([line.strip() for line in open(inFile)])
|
||||
|
||||
for inFile in args.hostInput:
|
||||
if os.path.isfile(inFile):
|
||||
hostLines.extend([line.strip() for line in open(inFile)])
|
||||
|
||||
for inFile in args.mixedInput:
|
||||
try:
|
||||
tmpMixedEntries = json.load(open(inFile, 'r'))
|
||||
if isinstance(tmpMixedEntries, list):
|
||||
mixedEntries.extend(byteify(tmpMixedEntries));
|
||||
except:
|
||||
pass
|
||||
|
||||
# remove comments
|
||||
segmentLines = list(filter(lambda x: (len(x) > 0) and (not x.startswith('#')), segmentLines))
|
||||
hostLines = list(filter(lambda x: (len(x) > 0) and (not x.startswith('#')), hostLines))
|
||||
|
||||
if (len(segmentLines) > 0) or (len(hostLines) > 0) or (len(mixedEntries) > 0):
|
||||
|
||||
filterId = 0
|
||||
addedFields = set()
|
||||
|
||||
outFile = open(args.output, 'w+') if (args.output and args.output != '-') else sys.stdout
|
||||
try:
|
||||
print('filter {', file=outFile)
|
||||
print("", file=outFile)
|
||||
print(" # this file was automatically generated by {}".format(os.path.basename(__file__)), file=outFile)
|
||||
print("", file=outFile)
|
||||
|
||||
# process segment mappings into a dictionary of two dictionaries of lists (one for hosts, one for segments)
|
||||
# eg., tagListMap[required tag name][HOST_LIST_IDX|SEGMENT_LIST_IDX][network segment name] = [172.16.0.0/12, 192.168.0.0/24, 10.0.0.41]
|
||||
tagListMap = defaultdict(lambda: [defaultdict(list), defaultdict(list)])
|
||||
|
||||
# handle segment mappings
|
||||
for line in segmentLines:
|
||||
# CIDR to network segment format:
|
||||
# IP(s)|segment name|required tag
|
||||
#
|
||||
# where:
|
||||
# IP(s): comma-separated list of CIDR-formatted network IP addresses
|
||||
# eg., 10.0.0.0/8, 169.254.0.0/16, 172.16.10.41
|
||||
#
|
||||
# segment name: segment name to be assigned when event IP address(es) match
|
||||
#
|
||||
# required tag (optional): only check match and apply segment name if the event
|
||||
# contains this tag
|
||||
values = [x.strip() for x in line.split('|')]
|
||||
if len(values) >= 2:
|
||||
networkList = []
|
||||
for ip in ''.join(values[0].split()).split(','):
|
||||
try:
|
||||
networkList.append(str(ipaddress.ip_network(unicode(ip))).lower() if ('/' in ip) else str(ipaddress.ip_address(unicode(ip))).lower())
|
||||
except ValueError:
|
||||
eprint('"{}" is not a valid IP address, ignoring'.format(ip))
|
||||
segmentName = values[1]
|
||||
tagReq = values[2] if ((len(values) >= 3) and (len(values[2]) > 0)) else UNSPECIFIED_TAG
|
||||
if (len(networkList) > 0) and (len(segmentName) > 0):
|
||||
tagListMap[tagReq][SEGMENT_LIST_IDX][segmentName].extend(networkList)
|
||||
else:
|
||||
eprint('"{}" is not formatted correctly, ignoring'.format(line))
|
||||
else:
|
||||
eprint('"{}" is not formatted correctly, ignoring'.format(line))
|
||||
|
||||
# handle hostname mappings
|
||||
macAddrRegex = re.compile(r'([a-fA-F0-9]{2}[:|\-]?){6}')
|
||||
for line in hostLines:
|
||||
# IP or MAC address to host name map:
|
||||
# address|host name|required tag
|
||||
#
|
||||
# where:
|
||||
# address: comma-separated list of IPv4, IPv6, or MAC addresses
|
||||
# eg., 172.16.10.41, 02:42:45:dc:a2:96, 2001:0db8:85a3:0000:0000:8a2e:0370:7334
|
||||
#
|
||||
# host name: host name to be assigned when event address(es) match
|
||||
#
|
||||
# required tag (optional): only check match and apply host name if the event
|
||||
# contains this tag
|
||||
#
|
||||
values = [x.strip() for x in line.split('|')]
|
||||
if len(values) >= 2:
|
||||
addressList = []
|
||||
for addr in ''.join(values[0].split()).split(','):
|
||||
try:
|
||||
# see if it's an IP address
|
||||
addressList.append(str(ipaddress.ip_address(unicode(addr))).lower())
|
||||
except ValueError:
|
||||
# see if it's a MAC address
|
||||
if re.match(macAddrRegex, addr):
|
||||
# prepend _ temporarily to distinguish a mac address
|
||||
addressList.append("_{}".format(addr.replace('-', ':').lower()))
|
||||
else:
|
||||
eprint('"{}" is not a valid IP or MAC address, ignoring'.format(ip))
|
||||
hostName = values[1]
|
||||
tagReq = values[2] if ((len(values) >= 3) and (len(values[2]) > 0)) else UNSPECIFIED_TAG
|
||||
if (len(addressList) > 0) and (len(hostName) > 0):
|
||||
tagListMap[tagReq][HOST_LIST_IDX][hostName].extend(addressList)
|
||||
else:
|
||||
eprint('"{}" is not formatted correctly, ignoring'.format(line))
|
||||
else:
|
||||
eprint('"{}" is not formatted correctly, ignoring'.format(line))
|
||||
|
||||
# handle mixed entries from the JSON-formatted file
|
||||
for entry in mixedEntries:
|
||||
|
||||
# the entry must at least contain type, address, name; may optionally contain tag
|
||||
if (isinstance(entry, dict) and
|
||||
all(key in entry for key in (JSON_MAP_KEY_TYPE, JSON_MAP_KEY_NAME, JSON_MAP_KEY_ADDR)) and
|
||||
entry[JSON_MAP_KEY_TYPE] in (JSON_MAP_TYPE_SEGMENT, JSON_MAP_TYPE_HOST) and
|
||||
(len(entry[JSON_MAP_KEY_NAME]) > 0) and
|
||||
(len(entry[JSON_MAP_KEY_ADDR]) > 0)):
|
||||
|
||||
addressList = []
|
||||
networkList = []
|
||||
|
||||
tagReq = entry[JSON_MAP_KEY_TAG] if (JSON_MAP_KEY_TAG in entry) and (len(entry[JSON_MAP_KEY_TAG]) > 0) else UNSPECIFIED_TAG
|
||||
|
||||
# account for comma-separated multiple addresses per 'address' value
|
||||
for addr in ''.join(entry[JSON_MAP_KEY_ADDR].split()).split(','):
|
||||
|
||||
if (entry[JSON_MAP_KEY_TYPE] == JSON_MAP_TYPE_SEGMENT):
|
||||
# potentially interpret address as a CIDR-formatted subnet
|
||||
try:
|
||||
networkList.append(str(ipaddress.ip_network(unicode(addr))).lower() if ('/' in addr) else str(ipaddress.ip_address(unicode(addr))).lower())
|
||||
except ValueError:
|
||||
eprint('"{}" is not a valid IP address, ignoring'.format(addr))
|
||||
|
||||
else:
|
||||
# should be an IP or MAC address
|
||||
try:
|
||||
# see if it's an IP address
|
||||
addressList.append(str(ipaddress.ip_address(unicode(addr))).lower())
|
||||
except ValueError:
|
||||
# see if it's a MAC address
|
||||
if re.match(macAddrRegex, addr):
|
||||
# prepend _ temporarily to distinguish a mac address
|
||||
addressList.append("_{}".format(addr.replace('-', ':').lower()))
|
||||
else:
|
||||
eprint('"{}" is not a valid IP or MAC address, ignoring'.format(ip))
|
||||
|
||||
if (len(networkList) > 0):
|
||||
tagListMap[tagReq][SEGMENT_LIST_IDX][entry[JSON_MAP_KEY_NAME]].extend(networkList)
|
||||
|
||||
if (len(addressList) > 0):
|
||||
tagListMap[tagReq][HOST_LIST_IDX][entry[JSON_MAP_KEY_NAME]].extend(addressList)
|
||||
|
||||
# go through the lists of segments/hosts, which will now be organized by required tag first, then
|
||||
# segment/host name, then the list of addresses
|
||||
for tag, nameMaps in tagListMap.iteritems():
|
||||
print("", file=outFile)
|
||||
|
||||
# if a tag name is specified, print the IF statement verifying the tag's presence
|
||||
if tag != UNSPECIFIED_TAG:
|
||||
print(' if ("{}" in [tags]) {{'.format(tag), file=outFile)
|
||||
try:
|
||||
|
||||
# for the host names(s) to be checked, create two filters, one for source IP|MAC and one for dest IP|MAC
|
||||
for hostName, addrList in nameMaps[HOST_LIST_IDX].iteritems():
|
||||
|
||||
# ip addresses mapped to hostname
|
||||
ipList = list(set([a for a in addrList if not a.startswith('_')]))
|
||||
if (len(ipList) >= 1):
|
||||
for source in ['orig', 'resp']:
|
||||
filterId += 1
|
||||
fieldName = "{}_h".format(source)
|
||||
newFieldName = "{}_hostname".format(source)
|
||||
print("", file=outFile)
|
||||
print(' if ([zeek][{}]) and ({}) {{ '.format(fieldName, ' or '.join(['([zeek][{}] == "{}")'.format(fieldName, ip) for ip in ipList])), file=outFile)
|
||||
print(' mutate {{ id => "mutate_add_autogen_{}_ip_hostname_{}"'.format(source, filterId), file=outFile)
|
||||
print(' add_field => {{ "[zeek][{}]" => "{}" }}'.format(newFieldName, hostName), file=outFile)
|
||||
print(" }", file=outFile)
|
||||
print(" }", file=outFile)
|
||||
addedFields.add("[zeek][{}]".format(newFieldName))
|
||||
|
||||
# mac addresses mapped to hostname
|
||||
macList = list(set([a for a in addrList if a.startswith('_')]))
|
||||
if (len(macList) >= 1):
|
||||
for source in ['orig', 'resp']:
|
||||
filterId += 1
|
||||
fieldName = "{}_l2_addr".format(source)
|
||||
newFieldName = "{}_hostname".format(source)
|
||||
print("", file=outFile)
|
||||
print(' if ([zeek][{}]) and ({}) {{ '.format(fieldName, ' or '.join(['([zeek][{}] == "{}")'.format(fieldName, mac[1:]) for mac in macList])), file=outFile)
|
||||
print(' mutate {{ id => "mutate_add_autogen_{}_mac_hostname_{}"'.format(source, filterId), file=outFile)
|
||||
print(' add_field => {{ "[zeek][{}]" => "{}" }}'.format(newFieldName, hostName), file=outFile)
|
||||
print(" }", file=outFile)
|
||||
print(" }", file=outFile)
|
||||
addedFields.add("[zeek][{}]".format(newFieldName))
|
||||
|
||||
# for the segment(s) to be checked, create two cidr filters, one for source IP and one for dest IP
|
||||
for segmentName, ipList in nameMaps[SEGMENT_LIST_IDX].iteritems():
|
||||
ipList = list(set(ipList))
|
||||
for source in ['orig', 'resp']:
|
||||
filterId += 1
|
||||
# ip addresses/ranges mapped to network segment names
|
||||
fieldName = "{}_h".format(source)
|
||||
newFieldName = "{}_segment".format(source)
|
||||
print("", file=outFile)
|
||||
print(" if ([zeek][{}]) {{ cidr {{".format(fieldName), file=outFile)
|
||||
print(' id => "cidr_autogen_{}_segment_{}"'.format(source, filterId), file=outFile)
|
||||
print(' address => [ "%{{[zeek][{}]}}" ]'.format(fieldName), file=outFile)
|
||||
print(' network => [ {} ]'.format(', '.join('"{}"'.format(ip) for ip in ipList)), file=outFile)
|
||||
print(' add_tag => [ "{}" ]'.format(segmentName), file=outFile)
|
||||
print(' add_field => {{ "[zeek][{}]" => "{}" }}'.format(newFieldName, segmentName), file=outFile)
|
||||
print(" } }", file=outFile)
|
||||
addedFields.add("[zeek][{}]".format(newFieldName))
|
||||
|
||||
finally:
|
||||
# if a tag name is specified, close the IF statement verifying the tag's presence
|
||||
if tag != UNSPECIFIED_TAG:
|
||||
print("", file=outFile)
|
||||
print(' }} # end (if "{}" in [tags])'.format(tag), file=outFile)
|
||||
|
||||
finally:
|
||||
# deduplicate any added fields
|
||||
if addedFields:
|
||||
print("", file=outFile)
|
||||
print(' # deduplicate any added fields', file=outFile)
|
||||
for field in list(itertools.product(['orig', 'resp'], ['hostname', 'segment'])):
|
||||
newFieldName = "[zeek][{}_{}]".format(field[0], field[1])
|
||||
if newFieldName in addedFields:
|
||||
print("", file=outFile)
|
||||
print(' if ({}) {{ '.format(newFieldName), file=outFile)
|
||||
print(' ruby {{ id => "ruby{}deduplicate"'.format(''.join(c for c, _ in itertools.groupby(re.sub('[^0-9a-zA-Z]+', '_', newFieldName)))), file=outFile)
|
||||
print(' code => "', file=outFile)
|
||||
print(" fieldVals = event.get('{}')".format(newFieldName), file=outFile)
|
||||
print(" if fieldVals.kind_of?(Array) then event.set('{}', fieldVals.uniq) end".format(newFieldName), file=outFile)
|
||||
print(' "', file=outFile)
|
||||
print(' } }', file=outFile)
|
||||
|
||||
# close out filter with ending }
|
||||
print("", file=outFile)
|
||||
print('} # end Filter', file=outFile)
|
||||
|
||||
if outFile is not sys.stdout:
|
||||
outFile.close()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
156
Vagrant/resources/malcolm/logstash/scripts/ja3_build_list.py
Executable file
156
Vagrant/resources/malcolm/logstash/scripts/ja3_build_list.py
Executable file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
import pprint
|
||||
import re
|
||||
import requests
|
||||
import string
|
||||
import sys
|
||||
import yaml
|
||||
from collections import defaultdict
|
||||
|
||||
###################################################################################################
|
||||
debug = False
|
||||
PY3 = (sys.version_info.major >= 3)
|
||||
scriptName = os.path.basename(__file__)
|
||||
scriptPath = os.path.dirname(os.path.realpath(__file__))
|
||||
origPath = os.getcwd()
|
||||
|
||||
###################################################################################################
|
||||
if not PY3:
|
||||
if hasattr(__builtins__, 'raw_input'): input = raw_input
|
||||
|
||||
try:
|
||||
FileNotFoundError
|
||||
except NameError:
|
||||
FileNotFoundError = IOError
|
||||
|
||||
###################################################################################################
|
||||
# print to stderr
|
||||
def eprint(*args, **kwargs):
|
||||
print(*args, file=sys.stderr, **kwargs)
|
||||
|
||||
###################################################################################################
|
||||
# convenient boolean argument parsing
|
||||
def str2bool(v):
|
||||
if v.lower() in ('yes', 'true', 't', 'y', '1'):
|
||||
return True
|
||||
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
|
||||
return False
|
||||
else:
|
||||
raise argparse.ArgumentTypeError('Boolean value expected.')
|
||||
|
||||
###################################################################################################
|
||||
# main
|
||||
def main():
|
||||
global debug
|
||||
|
||||
parser = argparse.ArgumentParser(description=scriptName, add_help=False, usage='{} <arguments>'.format(scriptName))
|
||||
parser.add_argument('-v', '--verbose', dest='debug', type=str2bool, nargs='?', const=True, default=False, help="Verbose output")
|
||||
parser.add_argument('-o', '--output', required=True, dest='output', metavar='<STR>', type=str, default='', help='Output file')
|
||||
try:
|
||||
parser.error = parser.exit
|
||||
args = parser.parse_args()
|
||||
except SystemExit:
|
||||
parser.print_help()
|
||||
exit(2)
|
||||
|
||||
debug = args.debug
|
||||
if debug:
|
||||
eprint(os.path.join(scriptPath, scriptName))
|
||||
eprint("Arguments: {}".format(sys.argv[1:]))
|
||||
eprint("Arguments: {}".format(args))
|
||||
else:
|
||||
sys.tracebacklimit = 0
|
||||
|
||||
ja3Map = defaultdict(list)
|
||||
fingerprint = None
|
||||
|
||||
urls = ['https://ja3er.com/getAllUasJson']
|
||||
for url in urls:
|
||||
try:
|
||||
for fingerprint in requests.get(url).json():
|
||||
if ('md5' in fingerprint) and fingerprint['md5'] and ('User-Agent' in fingerprint) and fingerprint['User-Agent']:
|
||||
ja3Map[fingerprint['md5']].append(fingerprint['User-Agent'].strip('"').strip("'"))
|
||||
except Exception as e:
|
||||
eprint('"{}" raised for "{}"'.format(str(e), fingerprint))
|
||||
|
||||
try:
|
||||
url = 'https://raw.githubusercontent.com/LeeBrotherston/tls-fingerprinting/master/fingerprints/fingerprints.json'
|
||||
keys = ['record_tls_version', 'ciphersuite', 'extensions', 'e_curves', 'ec_point_fmt']
|
||||
for fingerprint in [x for x in requests.get(url).text.splitlines() if (len(x) > 0) and (not x.startswith('#'))]:
|
||||
try:
|
||||
values = list()
|
||||
tmpMap = defaultdict(str)
|
||||
tmpMap.update(json.loads(fingerprint))
|
||||
for key in keys:
|
||||
values.append('-'.join([str(int(x, 0)) for x in tmpMap[key].split()]))
|
||||
if PY3:
|
||||
ja3Map[hashlib.md5(','.join(values).encode()).hexdigest()].extend(tmpMap['desc'].strip('"').strip("'").split(' / '))
|
||||
else:
|
||||
ja3Map[hashlib.md5(','.join(values)).hexdigest()].extend(tmpMap['desc'].strip('"').strip("'").split(' / '))
|
||||
except Exception as e:
|
||||
eprint('"{}" raised for "{}"'.format(str(e), fingerprint))
|
||||
except Exception as e:
|
||||
eprint('"{}" raised for "{}"'.format(str(e), fingerprint))
|
||||
|
||||
urls = ['https://raw.githubusercontent.com/trisulnsm/ja3prints/master/ja3fingerprint.json']
|
||||
for url in urls:
|
||||
try:
|
||||
for fingerprint in [x for x in requests.get(url).text.splitlines() if (len(x) > 0) and (not x.startswith('#'))]:
|
||||
try:
|
||||
values = list()
|
||||
tmpMap = defaultdict(str)
|
||||
tmpMap.update(json.loads(fingerprint))
|
||||
ja3Map[tmpMap['ja3_hash'].strip()].append(tmpMap['desc'].strip('"').strip("'"))
|
||||
except Exception as e:
|
||||
eprint('"{}" raised for "{}"'.format(str(e), fingerprint))
|
||||
except Exception as e:
|
||||
eprint('"{}" raised for "{}"'.format(str(e), fingerprint))
|
||||
|
||||
# this one has desc and ja3_hash backwards from the previous one
|
||||
urls = ['https://raw.githubusercontent.com/trisulnsm/ja3prints/master/newprints.json']
|
||||
for url in urls:
|
||||
try:
|
||||
for fingerprint in [x for x in requests.get(url).text.splitlines() if (len(x) > 0) and (not x.startswith('#'))]:
|
||||
try:
|
||||
values = list()
|
||||
tmpMap = defaultdict(str)
|
||||
tmpMap.update(json.loads(fingerprint))
|
||||
ja3Map[tmpMap['desc'].strip()].append(tmpMap['ja3_hash'].strip('"').strip("'"))
|
||||
except Exception as e:
|
||||
eprint('"{}" raised for "{}"'.format(str(e), fingerprint))
|
||||
except Exception as e:
|
||||
eprint('"{}" raised for "{}"'.format(str(e), fingerprint))
|
||||
|
||||
# this one is csv (and overlaps the previous one a lot)
|
||||
try:
|
||||
url = 'https://raw.githubusercontent.com/salesforce/ja3/master/lists/osx-nix-ja3.csv'
|
||||
for fingerprint in [x for x in requests.get(url).text.splitlines() if (len(x) > 0) and (not x.startswith('#'))]:
|
||||
vals = ' '.join(fingerprint.split()).split(',', 1)
|
||||
if (len(vals) == 2) and (len(vals[0]) == 32):
|
||||
ja3Map[vals[0].strip()].append(vals[1].strip('"').strip("'"))
|
||||
except Exception as e:
|
||||
eprint('"{}" raised for "{}"'.format(str(e), fingerprint))
|
||||
|
||||
finalMap = dict()
|
||||
for k, v in ja3Map.items():
|
||||
if (len(k) == 32) and all(c in string.hexdigits for c in k):
|
||||
finalMap[k] = list(set([element.strip('"').strip("'").strip() for element in v]))
|
||||
|
||||
with open(args.output, 'w+') as outfile:
|
||||
if PY3:
|
||||
yaml.dump(finalMap, outfile)
|
||||
else:
|
||||
yaml.safe_dump(finalMap, outfile, default_flow_style=False)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
89
Vagrant/resources/malcolm/logstash/scripts/logstash-start.sh
Executable file
89
Vagrant/resources/malcolm/logstash/scripts/logstash-start.sh
Executable file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2021 Battelle Energy Alliance, LLC. All rights reserved.
|
||||
|
||||
set -e
|
||||
|
||||
# if any pipelines are volume-mounted inside this docker container, they should belong to subdirectories under this path
|
||||
HOST_PIPELINES_DIR="/usr/share/logstash/malcolm-pipelines.available"
|
||||
|
||||
# runtime pipelines parent directory
|
||||
export PIPELINES_DIR="/usr/share/logstash/malcolm-pipelines"
|
||||
|
||||
# runtime pipeliens configuration file
|
||||
export PIPELINES_CFG="/usr/share/logstash/config/pipelines.yml"
|
||||
|
||||
# for each pipeline in /usr/share/logstash/malcolm-pipelines, append the contents of this file to the dynamically-generated
|
||||
# pipeline section in pipelines.yml (then delete 00_config.conf before starting)
|
||||
export PIPELINE_EXTRA_CONF_FILE="00_config.conf"
|
||||
|
||||
# files defining IP->host and MAC->host mapping
|
||||
INPUT_CIDR_MAP="/usr/share/logstash/config/cidr-map.txt"
|
||||
INPUT_HOST_MAP="/usr/share/logstash/config/host-map.txt"
|
||||
INPUT_MIXED_MAP="/usr/share/logstash/config/net-map.json"
|
||||
|
||||
# the name of the enrichment pipeline subdirectory under $PIPELINES_DIR
|
||||
ENRICHMENT_PIPELINE=${LOGSTASH_ENRICHMENT_PIPELINE:-"enrichment"}
|
||||
|
||||
# the name of the pipeline(s) to which input will send logs for parsing (comma-separated list, no quotes)
|
||||
PARSE_PIPELINE_ADDRESSES=${LOGSTASH_PARSE_PIPELINE_ADDRESSES:-"zeek-parse"}
|
||||
|
||||
# pipeline addresses for forwarding from Logstash to Elasticsearch (both "internal" and "external" pipelines)
|
||||
export ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL=${LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL:-"internal-es"}
|
||||
export ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL=${LOGSTASH_ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL:-"external-es"}
|
||||
ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES=${LOGSTASH_ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES:-"$ELASTICSEARCH_PIPELINE_ADDRESS_INTERNAL,$ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL"}
|
||||
|
||||
# ip-to-segment-logstash.py translate $INPUT_CIDR_MAP, $INPUT_HOST_MAP, $INPUT_MIXED_MAP into this logstash filter file
|
||||
NETWORK_MAP_OUTPUT_FILTER="$PIPELINES_DIR"/"$ENRICHMENT_PIPELINE"/16_host_segment_filters.conf
|
||||
|
||||
####################################################################################################################
|
||||
|
||||
# copy over pipeline filters from host-mapped volumes (if any) into their final resting places
|
||||
find "$HOST_PIPELINES_DIR" -mindepth 1 -maxdepth 1 -type d -print0 2>/dev/null | sort -z | \
|
||||
xargs -0 -n 1 -I '{}' bash -c '
|
||||
PIPELINE_NAME="$(basename "{}")"
|
||||
PIPELINES_DEST_DIR="$PIPELINES_DIR"/"$PIPELINE_NAME"
|
||||
mkdir -p "$PIPELINES_DEST_DIR"
|
||||
cp -f "{}"/* "$PIPELINES_DEST_DIR"/
|
||||
'
|
||||
|
||||
# dynamically generate final pipelines.yml configuration file from all of the pipeline directories
|
||||
> "$PIPELINES_CFG"
|
||||
find "$PIPELINES_DIR" -mindepth 1 -maxdepth 1 -type d -print0 2>/dev/null | sort -z | \
|
||||
xargs -0 -n 1 -I '{}' bash -c '
|
||||
PIPELINE_NAME="$(basename "{}")"
|
||||
PIPELINE_ADDRESS_NAME="$(cat "{}"/*.conf | sed -e "s/:[\}]*.*\(}\)/\1/" | envsubst | grep -P "\baddress\s*=>" | awk "{print \$3}" | sed "s/[\"'']//g" | head -n 1)"
|
||||
if [[ -n "$ES_EXTERNAL_HOSTS" ]] || [[ "$PIPELINE_ADDRESS_NAME" != "$ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL" ]]; then
|
||||
echo "- pipeline.id: malcolm-$PIPELINE_NAME" >> "$PIPELINES_CFG"
|
||||
echo " path.config: "{}"" >> "$PIPELINES_CFG"
|
||||
cat "{}"/"$PIPELINE_EXTRA_CONF_FILE" 2>/dev/null >> "$PIPELINES_CFG"
|
||||
rm -f "{}"/"$PIPELINE_EXTRA_CONF_FILE"
|
||||
echo >> "$PIPELINES_CFG"
|
||||
echo >> "$PIPELINES_CFG"
|
||||
fi
|
||||
'
|
||||
|
||||
# create filters for network segment and host mapping in the enrichment directory
|
||||
rm -f "$NETWORK_MAP_OUTPUT_FILTER"
|
||||
/usr/local/bin/ip-to-segment-logstash.py --mixed "$INPUT_MIXED_MAP" --segment "$INPUT_CIDR_MAP" --host "$INPUT_HOST_MAP" -o "$NETWORK_MAP_OUTPUT_FILTER"
|
||||
|
||||
if [[ -z "$ES_EXTERNAL_HOSTS" ]]; then
|
||||
# external ES host destination is not specified, remove external destination from enrichment pipeline output
|
||||
ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES="$(echo "$ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES" | sed "s/,[[:blank:]]*$ELASTICSEARCH_PIPELINE_ADDRESS_EXTERNAL//")"
|
||||
fi
|
||||
|
||||
# insert quotes around the elasticsearch parsing and output pipeline list
|
||||
MALCOLM_PARSE_PIPELINE_ADDRESSES=$(printf '"%s"\n' "${PARSE_PIPELINE_ADDRESSES//,/\",\"}")
|
||||
MALCOLM_ELASTICSEARCH_OUTPUT_PIPELINES=$(printf '"%s"\n' "${ELASTICSEARCH_OUTPUT_PIPELINE_ADDRESSES//,/\",\"}")
|
||||
|
||||
# do a manual global replace on these particular values in the config files, as Logstash doesn't like the environment variables with quotes in them
|
||||
find "$PIPELINES_DIR" -type f -name "*.conf" -exec sed -i "s/_MALCOLM_ELASTICSEARCH_OUTPUT_PIPELINES_/${MALCOLM_ELASTICSEARCH_OUTPUT_PIPELINES}/g" "{}" \; 2>/dev/null
|
||||
find "$PIPELINES_DIR" -type f -name "*.conf" -exec sed -i "s/_MALCOLM_PARSE_PIPELINE_ADDRESSES_/${MALCOLM_PARSE_PIPELINE_ADDRESSES}/g" "{}" \; 2>/dev/null
|
||||
|
||||
# import trusted CA certificates if necessary
|
||||
/usr/local/bin/jdk-cacerts-auto-import.sh || true
|
||||
|
||||
# start logstash (adapted from docker-entrypoint)
|
||||
env2yaml /usr/share/logstash/config/logstash.yml
|
||||
export LS_JAVA_OPTS="-Dls.cgroup.cpuacct.path.override=/ -Dls.cgroup.cpu.path.override=/ $LS_JAVA_OPTS"
|
||||
exec logstash
|
||||
29
Vagrant/resources/malcolm/logstash/supervisord.conf
Normal file
29
Vagrant/resources/malcolm/logstash/supervisord.conf
Normal file
@@ -0,0 +1,29 @@
|
||||
; Copyright (c) 2021 Battelle Energy Alliance, LLC. All rights reserved.
|
||||
|
||||
[inet_http_server]
|
||||
port=0.0.0.0:9001
|
||||
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
logfile=/dev/null
|
||||
logfile_maxbytes=0
|
||||
pidfile=/tmp/supervisord.pid
|
||||
|
||||
[rpcinterface:supervisor]
|
||||
supervisor.rpcinterface_factory=supervisor.rpcinterface:make_main_rpcinterface
|
||||
|
||||
[supervisorctl]
|
||||
serverurl=http://127.0.0.1:9001
|
||||
|
||||
[program:logstash]
|
||||
command=/usr/local/bin/logstash-start.sh
|
||||
autostart=true
|
||||
startsecs=0
|
||||
startretries=0
|
||||
stopwaitsecs=60
|
||||
stopasgroup=true
|
||||
killasgroup=true
|
||||
stopsignal=INT
|
||||
stdout_logfile=/dev/fd/1
|
||||
stdout_logfile_maxbytes=0
|
||||
redirect_stderr=true
|
||||
Reference in New Issue
Block a user