Commit f37e1b1e authored by Bozidar Proevski's avatar Bozidar Proevski Committed by root
Browse files

Added new components

Three new components were added:
Cassandra db
The Hive
Cortex (with embedded Elasticsearch v6)
parent 73a3933e
---
dslproxy: "<CHANGE_ME:hostname>"
dslproxy: "dsoclab.gn4-3-wp8-soc.sunet.se"
# TheHive Button plugin
THEHIVE_URL: "https://hive.gn4-3-wp8-soc.sunet.se/"
......@@ -36,6 +36,21 @@ mysql_name: "dsoclab-mysql"
mysql_img: "{{repo}}/mysql:{{version}}{{suffix}}"
mysql_dbrootpass: "Pass006"
cassandra_name: "dsoclab-cassandra"
cassandra_img: "{{repo}}/cassandra:{{version}}{{suffix}}"
thehive_name: "dsoclab-thehive"
thehive_img: "{{repo}}/thehive:{{version}}{{suffix}}"
# GENERATED WITH cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 64 | head -n 1
thehive_secret_key: "LcnI9eKLo33711BmCnzf6UM1y05pdmj3dlADL81PxuffWqhobRoiiGFftjNPKpmM"
cortex_name: "dsoclab-cortex"
cortex_img: "{{repo}}/cortex:{{version}}{{suffix}}"
cortex_elasticsearch_mem: "256m"
# GENERATED WITH cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 64 | head -n 1
cortex_secret_key: "9CZ844IcAp5dHjsgU4iuaEssdopLcS6opzhVP3Ys4t4eRpNlHmwZdtfveLEXpM9D"
cortex_odfe_pass: "Pass009"
kspass: "Testing003"
tspass: "Testing003"
......@@ -44,8 +59,8 @@ sysctlconfig:
- { key: "net.core.wmem_max", val: "2097152" }
- { key: "vm.max_map_count" , val: "524288" }
nifi_javamem: "4g"
odfe_javamem: "2g"
nifi_javamem: "1g"
odfe_javamem: "512m"
nifi_version: 1.11.4
nifi_repo: "https://archive.apache.org/dist"
......@@ -53,13 +68,6 @@ nifi_repo: "https://archive.apache.org/dist"
ca_cn: "SOCTOOLS-CA"
soctools_users:
- firstname: "Arne"
lastname: "Oslebo"
username: "arne.oslebo"
email: "arne.oslebo@uninett.no"
DN: "CN=Arne Oslebo"
CN: "Arne Oslebo"
password: "Pass002"
- firstname: "Bozidar"
lastname: "Proevski"
username: "bozidar.proevski"
......@@ -67,6 +75,13 @@ soctools_users:
DN: "CN=Bozidar Proevski"
CN: "Bozidar Proevski"
password: "Pass001"
- firstname: "Arne"
lastname: "Oslebo"
username: "arne.oslebo"
email: "arne.oslebo@uninett.no"
DN: "CN=Arne Oslebo"
CN: "Arne Oslebo"
password: "Pass002"
odfees_img: "{{repo}}/odfees:{{version}}{{suffix}}"
odfekibana_img: "{{repo}}/odfekibana:{{version}}{{suffix}}"
......
---
- name: Configure the cassandra Dockerfile
template:
src: cassandra/Dockerfile.j2
dest: "{{role_path}}/files/cassandraDockerfile"
- name: Build cassandra image
command: docker build -t {{repo}}/cassandra:{{version}}{{suffix}} -f {{role_path}}/files/cassandraDockerfile {{role_path}}/files
......@@ -5,86 +5,96 @@
name: "{{repo}}/centos:{{version}}{{suffix}}"
register: centosimg
- name: Assert CentOS image
assert:
that: centosimg.images | length == 0
fail_msg: "CentOS image already exists"
#- name: Skip if image exists
# meta: end_play
# when: centosimg.images | length != 0
- name: Create etc tree in build directory
file:
path: '{{ temp_root}}/{{ item.path }}'
state: directory
mode: '{{ item.mode }}'
with_filetree: templates/etcroot/
when: item.state == 'directory'
# tags:
# - start
- name: Populate etc tree in build directory
template:
src: '{{ item.src }}'
dest: '{{ temp_root}}/{{ item.path }}'
force: yes
with_filetree: templates/etcroot
when: item.state == 'file'
- name: Create dev tree in build directory
command: mknod -m {{ item.mode }} {{ item.dev }} {{ item.type }} {{ item.major }} {{ item.minor }}
args:
creates: "{{ item.dev }}"
with_items:
- { mode: 600, dev: "{{temp_root}}/dev/console", type: c, major: 5, minor: 1 }
- { mode: 600, dev: "{{temp_root}}/dev/initctl", type: p, major: '', minor: '' }
- { mode: 666, dev: "{{temp_root}}/dev/full", type: c, major: 1, minor: 7 }
- { mode: 666, dev: "{{temp_root}}/dev/null", type: c, major: 1, minor: 3 }
- { mode: 666, dev: "{{temp_root}}/dev/ptmx", type: c, major: 5, minor: 2 }
- { mode: 666, dev: "{{temp_root}}/dev/random", type: c, major: 1, minor: 8 }
- { mode: 666, dev: "{{temp_root}}/dev/tty", type: c, major: 5, minor: 0 }
- { mode: 666, dev: "{{temp_root}}/dev/tty0", type: c, major: 4, minor: 0 }
- { mode: 666, dev: "{{temp_root}}/dev/urandom", type: c, major: 1, minor: 9 }
- { mode: 666, dev: "{{temp_root}}/dev/zero", type: c, major: 1, minor: 5 }
- name: Install centos-release in build directory
yum:
installroot: "{{ temp_root}}"
name: centos-release
state: present
- name: Install Core CentOS in build directory
yum:
installroot: "{{ temp_root}}"
name:
- "@Core"
- yum-plugin-ovl.noarch
- epel-release
state: present
- name: Clean yum cache
command: 'yum --installroot="{{ temp_root}}" -y clean all'
- name: Remove unneeded directories
file:
path: "{{temp_root}}/{{item}}"
state: absent
with_items:
- usr/share/cracklib
- var/cache/yum
- sbin/sln
- etc/ld.so.cache
- var/cache/ldconfig
- usr/share/backgrounds
- name: Create needed directories
file:
path: "{{temp_root}}/{{item}}"
state: directory
with_items:
- var/cache/yum
- var/cache/ldconfig
- name: Import image in docker
shell: tar --numeric-owner -c -C {{temp_root }} . | docker import - {{repo}}/centos:{{version}}{{suffix}}
- name: Remove temp directory
file:
path: "{{temp_root}}"
state: absent
#- name: Assert CentOS image
# assert:
# that: centosimg.images | length == 0
# fail_msg: "CentOS image already exists"
- name: Build CentOS image
when: centosimg.images | length == 0
block:
- name: Create etc tree in build directory
file:
path: '{{ temp_root}}/{{ item.path }}'
state: directory
mode: '{{ item.mode }}'
with_filetree: templates/etcroot/
when: item.state == 'directory'
- name: Populate etc tree in build directory
template:
src: '{{ item.src }}'
dest: '{{ temp_root}}/{{ item.path }}'
force: yes
with_filetree: templates/etcroot
when: item.state == 'file'
- name: Create dev tree in build directory
command: mknod -m {{ item.mode }} {{ item.dev }} {{ item.type }} {{ item.major }} {{ item.minor }}
args:
creates: "{{ item.dev }}"
with_items:
- { mode: 600, dev: "{{temp_root}}/dev/console", type: c, major: 5, minor: 1 }
- { mode: 600, dev: "{{temp_root}}/dev/initctl", type: p, major: '', minor: '' }
- { mode: 666, dev: "{{temp_root}}/dev/full", type: c, major: 1, minor: 7 }
- { mode: 666, dev: "{{temp_root}}/dev/null", type: c, major: 1, minor: 3 }
- { mode: 666, dev: "{{temp_root}}/dev/ptmx", type: c, major: 5, minor: 2 }
- { mode: 666, dev: "{{temp_root}}/dev/random", type: c, major: 1, minor: 8 }
- { mode: 666, dev: "{{temp_root}}/dev/tty", type: c, major: 5, minor: 0 }
- { mode: 666, dev: "{{temp_root}}/dev/tty0", type: c, major: 4, minor: 0 }
- { mode: 666, dev: "{{temp_root}}/dev/urandom", type: c, major: 1, minor: 9 }
- { mode: 666, dev: "{{temp_root}}/dev/zero", type: c, major: 1, minor: 5 }
- name: Install centos-release in build directory
yum:
installroot: "{{ temp_root}}"
name: centos-release
state: present
- name: Install Core CentOS in build directory
yum:
installroot: "{{ temp_root}}"
name:
- "@Core"
- yum-plugin-ovl.noarch
- epel-release
state: present
- name: Clean yum cache
command: 'yum --installroot="{{ temp_root}}" -y clean all'
- name: Remove unneeded directories
file:
path: "{{temp_root}}/{{item}}"
state: absent
with_items:
- usr/share/cracklib
- var/cache/yum
- sbin/sln
- etc/ld.so.cache
- var/cache/ldconfig
- usr/share/backgrounds
- name: Create needed directories
file:
path: "{{temp_root}}/{{item}}"
state: directory
with_items:
- var/cache/yum
- var/cache/ldconfig
- name: Import image in docker
shell: tar --numeric-owner -c -C {{temp_root }} . | docker import - {{repo}}/centos:{{version}}{{suffix}}
- name: Remove temp directory
file:
path: "{{temp_root}}"
state: absent
---
- name: Configure the Cortex Dockerfile
template:
src: cortex/Dockerfile.j2
dest: "{{role_path}}/files/cortexDockerfile"
- name: Build the Cortex image
command: docker build -t {{repo}}/cortex:{{version}}{{suffix}} -f {{role_path}}/files/cortexDockerfile {{role_path}}/files
......@@ -15,3 +15,6 @@
- include: odfekibana.yml
- include: keycloak.yml
- include: misp.yml
- include: cassandra.yml
- include: thehive.yml
- include: cortex.yml
---
- name: Configure theHive Dockerfile
template:
src: thehive/Dockerfile.j2
dest: "{{role_path}}/files/thehiveDockerfile"
- name: Build theHive image
command: docker build -t {{repo}}/thehive:{{version}}{{suffix}} -f {{role_path}}/files/thehiveDockerfile {{role_path}}/files
FROM {{repo}}/openjdk:{{version}}{{suffix}}
USER root
#COPY cassandra.repo /etc/yum.repos.d/cassandra.repo
#COPY supervisord.conf /etc/supervisord.conf
#COPY start.sh /start.sh
RUN echo "[cassandra]" > /etc/yum.repos.d/cassandra.repo && \
echo "name=Apache Cassandra" >> /etc/yum.repos.d/cassandra.repo && \
echo "baseurl=https://downloads.apache.org/cassandra/redhat/311x/" >> /etc/yum.repos.d/cassandra.repo && \
echo "gpgcheck=1" >> /etc/yum.repos.d/cassandra.repo && \
echo "repo_gpgcheck=1" >> /etc/yum.repos.d/cassandra.repo && \
echo "gpgkey=https://downloads.apache.org/cassandra/KEYS" >> /etc/yum.repos.d/cassandra.repo && \
echo '#!/bin/bash' > /start.sh && \
echo 'export CASSANDRA_HOME=/usr/share/cassandra' >> /start.sh && \
echo 'export CASSANDRA_CONF=$CASSANDRA_HOME/conf' >> /start.sh && \
echo 'export CASSANDRA_INCLUDE=$CASSANDRA_HOME/cassandra.in.sh' >> /start.sh && \
echo 'log_file=/var/log/cassandra/cassandra.log' >> /start.sh && \
echo 'pid_file=/var/run/cassandra/cassandra.pid' >> /start.sh && \
echo 'lock_file=/var/lock/subsys/cassandra' >> /start.sh && \
echo 'CASSANDRA_PROG=/usr/sbin/cassandra' >> /start.sh && \
echo '' >> /start.sh && \
echo '$CASSANDRA_PROG -p $pid_file > $log_file 2>&1' >> /start.sh && \
yum install -y epel-release && \
yum install -y cassandra supervisor && \
mkdir /usr/share/cassandra/conf && \
cp -a /etc/cassandra/conf/* /usr/share/cassandra/conf && \
chown -R cassandra:cassandra /usr/share/cassandra && \
chown -R cassandra:cassandra /var/lib/cassandra && \
sed -i -e 's,/etc/cassandra,/usr/share/cassandra,g' /usr/share/cassandra/cassandra.in.sh && \
chmod a+x /start.sh && \
yum -y clean all
EXPOSE 7000 9042
#ENTRYPOINT ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]
USER cassandra
# ENTRYPOINT ["/start.sh"]
[cassandra]
name=Apache Cassandra
baseurl=https://downloads.apache.org/cassandra/redhat/311x/
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://downloads.apache.org/cassandra/KEYS
#!/bin/bash
export CASSANDRA_HOME=/usr/share/cassandra
export CASSANDRA_CONF=$CASSANDRA_HOME/conf
export CASSANDRA_INCLUDE=$CASSANDRA_HOME/cassandra.in.sh
log_file=/var/log/cassandra/cassandra.log
pid_file=/var/run/cassandra/cassandra.pid
lock_file=/var/lock/subsys/cassandra
CASSANDRA_PROG=/usr/sbin/cassandra
$CASSANDRA_PROG -p $pid_file > $log_file 2>&1
[supervisord]
loglevel=debug
nodaemon=true
[program:cassandra]
user=cassandra
directory=/usr/share/cassandra
stdout_logfile=/var/log/cassandra/cassandra.log
redirect_stderr=true
environment=CASSANDRA_HOME="/usr/share/cassandra",CASSANDRA_CONF="/usr/share/cassandra/conf",CASSANDRA_INCLUDE="$CASSANDRA_HOME/cassandra.in.sh"
command=/usr/sbin/cassandra -p /var/run/cassandra/cassandra.pid
FROM {{repo}}/openjdk:{{version}}{{suffix}}
USER root
#COPY thehive.repo /etc/yum.repos.d/thehive.repo
#COPY supervisord.conf /etc/supervisord.conf
#COPY start.sh /start.sh
RUN echo "[thehive-project]" > /etc/yum.repos.d/thehive.repo && \
echo "enabled=1" >> /etc/yum.repos.d/thehive.repo && \
echo "priority=1" >> /etc/yum.repos.d/thehive.repo && \
echo "name=TheHive-Project RPM repository" >> /etc/yum.repos.d/thehive.repo && \
echo "baseurl=http://rpm.thehive-project.org/stable/noarch" >> /etc/yum.repos.d/thehive.repo && \
echo "gpgcheck=1" >> /etc/yum.repos.d/thehive.repo && \
yum install -y epel-release && \
rpm --import https://raw.githubusercontent.com/TheHive-Project/TheHive/master/PGP-PUBLIC-KEY && \
rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch && \
yum install -y cortex supervisor daemonize vim net-tools telnet htop python3-pip.noarch git gcc python3-devel.x86_64 ssdeep-devel.x86_64 python3-wheel.noarch libexif-devel.x86_64 libexif.x86_64 perl-Image-ExifTool.noarch gcc-c++ whois && \
rpm -Uvh https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-6.8.13.rpm && \
chown -R elasticsearch:elasticsearch /etc/elasticsearch && \
mkdir -p /home/cortex && \
chown -R cortex:cortex /home/cortex && \
chown -R cortex:cortex /etc/cortex && \
cd /opt && \
git clone https://github.com/TheHive-Project/Cortex-Analyzers && \
chown -R cortex:cortex /opt/Cortex-Analyzers && \
cd /opt/Cortex-Analyzers && \
for I in analyzers/*/requirements.txt; do LC_ALL=en_US.UTF-8 pip3 install --no-cache-dir -U -r $I || true; done && \
for I in responders/*/requirements.txt; do LC_ALL=en_US.UTF-8 pip3 install --no-cache-dir -U -r $I || true; done && \
yum -y clean all
EXPOSE 9001
#ENTRYPOINT ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]
USER cortex
# ENTRYPOINT ["/start.sh"]
# Sample Cortex application.conf file
## SECRET KEY
#
# The secret key is used to secure cryptographic functions.
#
# IMPORTANT: If you deploy your application to several instances, make
# sure to use the same key.
# # # # # #
# GENERATED WITH cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 64 | head -n 1
# # # # # #
play.http.secret.key="9CZ844IcAp5dHjsgU4iuaEssdopLcS6opzhVP3Ys4t4eRpNlHmwZdtfveLEXpM9D"
## ElasticSearch
search {
# Name of the index
index = cortex3
# ElasticSearch instance address.
# For cluster, join address:port with ',': "http://ip1:9200,ip2:9200,ip3:9200"
uri = "http://dsoclab-elastic:9200"
## Advanced configuration
# Scroll keepalive.
#keepalive = 1m
# Scroll page size.
#pagesize = 50
# Number of shards
#nbshards = 5
# Number of replicas
#nbreplicas = 1
# Arbitrary settings
#settings {
# # Maximum number of nested fields
# mapping.nested_fields.limit = 100
#}
## Authentication configuration
#search.username = ""
#search.password = ""
## SSL configuration
#search.keyStore {
# path = "/path/to/keystore"
# type = "JKS" # or PKCS12
# password = "keystore-password"
#}
#search.trustStore {
# path = "/path/to/trustStore"
# type = "JKS" # or PKCS12
# password = "trustStore-password"
#}
}
## Cache
#
# If an analyzer is executed against the same observable, the previous report can be returned without re-executing the
# analyzer. The cache is used only if the second job occurs within cache.job (the default is 10 minutes).
cache.job = 10 minutes
## Authentication
auth {
# "provider" parameter contains the authentication provider(s). It can be multi-valued, which is useful
# for migration.
# The available auth types are:
# - services.LocalAuthSrv : passwords are stored in the user entity within ElasticSearch). No
# configuration are required.
# - ad : use ActiveDirectory to authenticate users. The associated configuration shall be done in
# the "ad" section below.
# - ldap : use LDAP to authenticate users. The associated configuration shall be done in the
# "ldap" section below.
provider = [local]
ad {
# The Windows domain name in DNS format. This parameter is required if you do not use
# 'serverNames' below.
#domainFQDN = "mydomain.local"
# Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN
# above. If this parameter is not set, TheHive uses 'domainFQDN'.
#serverNames = [ad1.mydomain.local, ad2.mydomain.local]
# The Windows domain name using short format. This parameter is required.
#domainName = "MYDOMAIN"
# If 'true', use SSL to connect to the domain controller.
#useSSL = true
}
ldap {
# The LDAP server name or address. The port can be specified using the 'host:port'
# syntax. This parameter is required if you don't use 'serverNames' below.
#serverName = "ldap.mydomain.local:389"
# If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead.
#serverNames = [ldap1.mydomain.local, ldap2.mydomain.local]
# Account to use to bind to the LDAP server. This parameter is required.
#bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local"
# Password of the binding account. This parameter is required.
#bindPW = "***secret*password***"
# Base DN to search users. This parameter is required.
#baseDN = "ou=users,dc=mydomain,dc=local"
# Filter to search user in the directory server. Please note that {0} is replaced
# by the actual user name. This parameter is required.
#filter = "(cn={0})"
# If 'true', use SSL to connect to the LDAP directory server.
#useSSL = true
}
}
## ANALYZERS
#
analyzer {
# analyzer location
# url can be point to:
# - directory where analyzers are installed
# - json file containing the list of analyzer descriptions
urls = [
#"https://dl.bintray.com/thehive-project/cortexneurons/analyzers.json"
"/opt/Cortex-Analyzers/analyzers"
]
# Sane defaults. Do not change unless you know what you are doing.
fork-join-executor {
# Min number of threads available for analysis.
parallelism-min = 2
# Parallelism (threads) ... ceil(available processors * factor).
parallelism-factor = 2.0
# Max number of threads available for analysis.
parallelism-max = 4
}
}
# RESPONDERS
#
responder {
# responder location (same format as analyzer.urls)
urls = [
#"https://dl.bintray.com/thehive-project/cortexneurons/reponders.json"
"/opt/Cortex-Analyzers/responders"
]
# Sane defaults. Do not change unless you know what you are doing.
fork-join-executor {
# Min number of threads available for analysis.
parallelism-min = 2
# Parallelism (threads) ... ceil(available processors * factor).
parallelism-factor = 2.0
# Max number of threads available for analysis.
parallelism-max = 4
}
}
# It's the end my friend. Happy hunting!
[thehive-project]
enabled=1
priority=1
name=TheHive-Project RPM repository
baseurl=http://rpm.thehive-project.org/stable/noarch
gpgcheck=1
FROM gn43-dsl/centos:{{version}}{{suffix}}
FROM {{repo}}/centos:{{version}}{{suffix}}
ENV HAPROXY_VERSION 2.2.3
ENV HAPROXY_URL https://www.haproxy.org/download/2.2/src/haproxy-2.2.3.tar.gz
......
FROM {{repo}}/openjdk:{{version}}{{suffix}}
USER root
#COPY thehive.repo /etc/yum.repos.d/thehive.repo
#COPY supervisord.conf /etc/supervisord.conf
#COPY start.sh /start.sh
RUN echo "[thehive-project]" > /etc/yum.repos.d/thehive.repo && \
echo "enabled=1" >> /etc/yum.repos.d/thehive.repo && \
echo "priority=1" >> /etc/yum.repos.d/thehive.repo && \
echo "name=TheHive-Project RPM repository" >> /etc/yum.repos.d/thehive.repo && \
echo "baseurl=http://rpm.thehive-project.org/stable/noarch" >> /etc/yum.repos.d/thehive.repo && \
echo "gpgcheck=1" >> /etc/yum.repos.d/thehive.repo && \
yum install -y epel-release && \
rpm --import https://raw.githubusercontent.com/TheHive-Project/TheHive/master/PGP-PUBLIC-KEY && \
yum install -y thehive4 supervisor daemonize vim net-tools telnet htop && \
mkdir -p /opt/thp_data/files/thehive && \
chown -R thehive:thehive /opt/thp_data/files/thehive && \
mkdir -p /home/thehive && \
chown -R thehive:thehive /home/thehive /etc/thehive && \
yum -y clean all
EXPOSE 9000
#ENTRYPOINT ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]
USER thehive
# ENTRYPOINT ["/start.sh"]
#!/bin/bash
export CASSANDRA_HOME=/usr/share/cassandra
export CASSANDRA_CONF=$CASSANDRA_HOME/conf
export CASSANDRA_INCLUDE=$CASSANDRA_HOME/cassandra.in.sh
log_file=/var/log/cassandra/cassandra.log
pid_file=/var/run/cassandra/cassandra.pid
lock_file=/var/lock/subsys/cassandra
CASSANDRA_PROG=/usr/sbin/cassandra
$CASSANDRA_PROG -p $pid_file > $log_file 2>&1
[supervisord]
loglevel=debug
nodaemon=true
[program:cassandra]
user=cassandra
directory=/usr/share/cassandra
stdout_logfile=/var/log/cassandra/cassandra.log
redirect_stderr=true
environment=CASSANDRA_HOME="/usr/share/cassandra",CASSANDRA_CONF="/usr/share/cassandra/conf",CASSANDRA_INCLUDE="$CASSANDRA_HOME/cassandra.in.sh"