This commit is contained in:
commit
98dce030f8
47 changed files with 817 additions and 0 deletions
172
execute
Executable file
172
execute
Executable file
|
@ -0,0 +1,172 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e;
|
||||
|
||||
# Interesting chars:
|
||||
# 🛰 - Satellite - looks very cool indeed in Firefox
|
||||
|
||||
lantern_path="../lantern-build-engine";
|
||||
if [[ -z "${do_parallel}" ]]; then
|
||||
do_parallel="true";
|
||||
fi
|
||||
|
||||
###############################################################################
|
||||
|
||||
# Make sure the current directory is the location of this script to simplify matters
|
||||
cd "$(dirname "$(readlink -f "$0")")";
|
||||
|
||||
# Check out the lantern git submodule if needed
|
||||
if [ ! -f "${lantern_path}/lantern.sh" ]; then git submodule update --init "${lantern_path}"; fi
|
||||
|
||||
#shellcheck disable=SC1090
|
||||
source "${lantern_path}/lantern.sh";
|
||||
|
||||
###############################################################################
|
||||
|
||||
if [[ -z "${2}" ]]; then
|
||||
echo "Usage:";
|
||||
echo " ./execute {{path/to/jobfile}} {{path/to/hostsfile_a}} {{path/to/hostsfile_n}}";
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
job_filepath="$1";
|
||||
if [[ ! -f "${job_filepath}" ]]; then
|
||||
job_filepath="jobs/${job_filepath}/${job_filepath}.job";
|
||||
fi
|
||||
|
||||
shift;
|
||||
|
||||
hosts="";
|
||||
while [[ ! -z "${1}" ]]; do
|
||||
hosts_filename="${1}";
|
||||
if [[ ! -f "${hosts_filename}" ]]; then
|
||||
hosts_filename="hosts/${hosts_filename}.txt";
|
||||
fi
|
||||
|
||||
hosts="$(echo "${hosts}" && cat "${hosts_filename}")";
|
||||
shift;
|
||||
done
|
||||
|
||||
ssh_configfile="./ssh-config";
|
||||
|
||||
if [[ ! -x "${job_filepath}" ]]; then
|
||||
echo -e "${FRED}Error: ${job_filepath} doesn't exist, or is not executable.${RS}" >&2;
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
log_msg() {
|
||||
echo -e "[ $(date -u +"%Y-%m-%dT%H:%M:%SZ") ] $*" >&2;
|
||||
}
|
||||
|
||||
# $1 Command to execute
|
||||
RUN_ONCE() {
|
||||
command="${1}";
|
||||
hash="$(echo "${command}" | sha256sum | cut -d- -f1)";
|
||||
|
||||
if [[ -z "${EXECUTE_HOSTNAME}" ]]; then
|
||||
task_end 1 "Oops! The hostname to execute on wasn't found. This is probably a bug.";
|
||||
fi
|
||||
|
||||
log_msg "️🛡️ ${HC}[ ${EXECUTE_HOSTNAME} ]${RS} ${FBLE}RUN_ONCE${RS} ${command}";
|
||||
|
||||
if [[ "${INTERACTIVE}" == "true" ]]; then
|
||||
echo "Enabling interactive mode";
|
||||
SSH_FLAGS="-t ";
|
||||
fi
|
||||
|
||||
echo "
|
||||
if [[ ! -d /etc/remote-exec ]]; then sudo mkdir /etc/remote-exec; fi
|
||||
if [[ ! -f /etc/remote-exec/${hash} ]]; then
|
||||
echo \"[ \${HOSTNAME} ] [ \$(date -u +\"%Y-%m-%dT%H:%M:%SZ\") ] Executing once\"
|
||||
${command}
|
||||
sudo touch /etc/remote-exec/${hash};
|
||||
fi
|
||||
" | ssh ${SSH_FLAGS} -F "${ssh_configfile}" "${EXECUTE_HOSTNAME}" bash;
|
||||
|
||||
}
|
||||
|
||||
# $1 Command to execute
|
||||
RUN() {
|
||||
command="${1}";
|
||||
|
||||
if [[ -z "${EXECUTE_HOSTNAME}" ]]; then
|
||||
task_end 1 "Oops! The hostname to execute on wasn't found. This is probably a bug.";
|
||||
fi
|
||||
|
||||
log_msg "🚏 ${HC}[ ${EXECUTE_HOSTNAME} ]${RS} ${FBLE}RUN${RS} ${command}";
|
||||
|
||||
if [[ "${INTERACTIVE}" == "true" ]]; then
|
||||
echo "Enabling interactive mode";
|
||||
SSH_FLAGS="-t ";
|
||||
fi
|
||||
|
||||
echo "${command}" | ssh ${SSH_FLAGS} -F "${ssh_configfile}" "${EXECUTE_HOSTNAME}" bash;
|
||||
}
|
||||
|
||||
# $1 Filepath to script to copy & execute
|
||||
SCRIPT() {
|
||||
script_filepath="${1}";
|
||||
|
||||
remote_filepath="/tmp/$(basename ${script_filepath})";
|
||||
|
||||
if [[ -z "${EXECUTE_HOSTNAME}" ]]; then
|
||||
task_end 1 "Oops! The hostname to execute on wasn't found. This is probably a bug.";
|
||||
fi
|
||||
|
||||
log_msg "🚂 ${HC}[ ${EXECUTE_HOSTNAME} ]${RS} ${FMAG}SCRIPT${RS} ${command}";
|
||||
|
||||
scp -F "${ssh_configfile}" "${script_filepath}" "${EXECUTE_HOSTNAME}:${remote_filepath}";
|
||||
echo -e "${remote_filepath}\nrm ${remote_filepath}" | ssh -F "${ssh_configfile}" "${EXECUTE_HOSTNAME}" bash;
|
||||
echo ""
|
||||
}
|
||||
|
||||
# $1 The filepath to copy
|
||||
# $2 The location to copy it to on the remote host
|
||||
COPY() {
|
||||
filepath_local="${1}";
|
||||
filepath_remote="${2}";
|
||||
|
||||
log_msg "🚚 ${HC}[ ${EXECUTE_HOSTNAME} ]${RS} ${FMAG}COPY${RS} ${filepath_local} → ${filepath_remote}";
|
||||
|
||||
if [[ ! -f "${filepath_local}" ]]; then
|
||||
task_end 1 "[ ${HC}COPY${RS} ] Error: Couldn't find ${filepath_local} on disk (CWD is ${PWD}).";
|
||||
fi
|
||||
|
||||
if [[ -z "${EXECUTE_HOSTNAME}" ]]; then
|
||||
task_end 1 "[ ${HC}COPY${RS} ] Oops! The hostname to copy to wasn't found. This is probably a bug.";
|
||||
fi
|
||||
|
||||
|
||||
scp -F "${ssh_configfile}" "${filepath_local}" "${EXECUTE_HOSTNAME}:${filepath_remote}";
|
||||
}
|
||||
|
||||
# $1 job filepath
|
||||
# $2 hostname
|
||||
__do_execute() {
|
||||
job_filepath="${1}";
|
||||
hostname="${2}";
|
||||
log_msg "${FBLE}${HC}⌛${RS} Starting on ${HC}${hostname}${RS}";
|
||||
|
||||
export EXECUTE_HOSTNAME="${hostname}";
|
||||
export JOBFILE_DIR; JOBFILE_DIR="$(dirname "${job_filepath}")";
|
||||
|
||||
#shellcheck disable=SC1090
|
||||
source "${job_filepath}";
|
||||
|
||||
log_msg "${FGRN}${HC}✔${RS} Finished on ${HC}${hostname}${RS}";
|
||||
}
|
||||
|
||||
task_begin "Executing ${job_filepath}";
|
||||
|
||||
while read -r hostname; do
|
||||
if [[ -z "${hostname}" ]]; then
|
||||
continue;
|
||||
fi
|
||||
|
||||
if [[ "${do_parallel}" == "true" ]]; then
|
||||
__do_execute "${job_filepath}" "${hostname}" &
|
||||
else
|
||||
__do_execute "${job_filepath}" "${hostname}";
|
||||
fi
|
||||
done < <(echo "${hosts}");
|
||||
|
||||
wait
|
2
hosts/clients-consul.txt
Normal file
2
hosts/clients-consul.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
cluster-balusha
|
||||
cluster-silverleaf
|
2
hosts/clients-nomad.txt
Normal file
2
hosts/clients-nomad.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
cluster-sycadil
|
||||
cluster-spatterling
|
1
hosts/controller.txt
Normal file
1
hosts/controller.txt
Normal file
|
@ -0,0 +1 @@
|
|||
cluster-wopplefox
|
2
hosts/servers-consul.txt
Normal file
2
hosts/servers-consul.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
cluster-sycadil
|
||||
cluster-spatterling
|
2
hosts/servers-nomad.txt
Normal file
2
hosts/servers-nomad.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
cluster-balusha
|
||||
cluster-silverleaf
|
1
hosts/servers-vault.txt
Normal file
1
hosts/servers-vault.txt
Normal file
|
@ -0,0 +1 @@
|
|||
cluster-balusha
|
4
hosts/workers.txt
Normal file
4
hosts/workers.txt
Normal file
|
@ -0,0 +1,4 @@
|
|||
cluster-sycadil
|
||||
cluster-spatterling
|
||||
cluster-balusha
|
||||
cluster-silverleaf
|
8
jobs/apt-cache/apt-cache.job
Executable file
8
jobs/apt-cache/apt-cache.job
Executable file
|
@ -0,0 +1,8 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "echo 'Acquire::http { Proxy \"http://172.16.230.100:3142\"; }' | sudo tee /etc/apt/apt.conf.d/proxy"
|
||||
|
||||
# Use http instead of https to allow caching
|
||||
RUN "echo 'deb http://archive.raspberrypi.org/debian/ buster main ui' | sudo tee /etc/apt/sources.list.d/raspi.list";
|
||||
|
||||
RUN_ONCE "sudo rm /etc/apt/sources.list.d/raspi";
|
3
jobs/avahi-daemon/avahi-daemon.job
Executable file
3
jobs/avahi-daemon/avahi-daemon.job
Executable file
|
@ -0,0 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt install avahi-daemon"
|
6
jobs/bin-folder/bin-folder.job
Executable file
6
jobs/bin-folder/bin-folder.job
Executable file
|
@ -0,0 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-get update";
|
||||
RUN "sudo apt-get install git";
|
||||
RUN "git clone https://gitlab.com/sbrl/bin.git"
|
||||
RUN "cd \${HOME}/bin && GPG_SIGN_COMMITS=n ./envsetup";
|
59
jobs/collectd/collectd.conf
Normal file
59
jobs/collectd/collectd.conf
Normal file
|
@ -0,0 +1,59 @@
|
|||
Interval 300
|
||||
|
||||
LoadPlugin load
|
||||
LoadPlugin thermal
|
||||
LoadPlugin memory
|
||||
LoadPlugin disk
|
||||
LoadPlugin swap
|
||||
LoadPlugin uptime
|
||||
#LoadPlugin smart
|
||||
|
||||
LoadPlugin df
|
||||
LoadPlugin irq
|
||||
LoadPlugin contextswitch
|
||||
LoadPlugin interface
|
||||
LoadPlugin ping
|
||||
#LoadPlugin nginx
|
||||
#LoadPlugin ntpd
|
||||
|
||||
LoadPlugin exec
|
||||
|
||||
LoadPlugin processes
|
||||
|
||||
LoadPlugin network
|
||||
|
||||
<Plugin df>
|
||||
MountPoint "/"
|
||||
MountPoint "/boot"
|
||||
</Plugin>
|
||||
|
||||
<Plugin interface>
|
||||
Interface "eth0"
|
||||
IgnoreSelected false
|
||||
</Plugin>
|
||||
|
||||
<Plugin ping>
|
||||
Host "elessar.mooncarrot.space"
|
||||
Host "wopplefox.mooncarrot.space"
|
||||
Host "ubuntu.mirrors.ovh.net"
|
||||
#Host "s3-eu-west-1.amazonaws.com"
|
||||
#Host "github.com"
|
||||
#Host "api.backblazeb2.com"
|
||||
</Plugin>
|
||||
|
||||
#<Plugin ntpd>
|
||||
# Host "localhost"
|
||||
# Port "123"
|
||||
#</Plugin>
|
||||
|
||||
<Plugin processes>
|
||||
CollectMemoryMaps false
|
||||
</Plugin>
|
||||
|
||||
<Plugin "network">
|
||||
<Server "5.198.44.45">
|
||||
SecurityLevel "Encrypt"
|
||||
Username "cluster"
|
||||
Password "{{{PASSWORD}}}"
|
||||
</Server>
|
||||
</Plugin>
|
25
jobs/collectd/collectd.job
Executable file
25
jobs/collectd/collectd.job
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [[ -z "${COLLECTD_PW}" ]]; then
|
||||
echo "Error: COLLECTD_PW environment variable is not set";
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
RUN "sudo apt-get install -y collectd liboping0";
|
||||
|
||||
tmpfile_collectd="$(mktemp --tmpdir "collectd-XXXXXXX")";
|
||||
|
||||
chmod 0600 "${tmpfile_collectd}";
|
||||
sed -e "s/{{{PASSWORD}}}/${COLLECTD_PW}/g" <"${JOBFILE_DIR}/collectd.conf" >"${tmpfile_collectd}";
|
||||
unset COLLECTD_PW;
|
||||
|
||||
COPY "${tmpfile_collectd}" "/tmp/collectd.conf";
|
||||
|
||||
rm "${tmpfile_collectd}";
|
||||
|
||||
RUN "sudo chown root:root /tmp/collectd.conf";
|
||||
RUN "sudo chmod 0600 /tmp/collectd.conf";
|
||||
RUN "sudo mv /tmp/collectd.conf /etc/collectd/collectd.conf";
|
||||
|
||||
RUN "sudo systemctl restart collectd.service";
|
||||
RUN "sudo systemctl enable collectd.service";
|
8
jobs/config-git/config-git.job
Executable file
8
jobs/config-git/config-git.job
Executable file
|
@ -0,0 +1,8 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-get install --yes git openssh-client";
|
||||
|
||||
RUN 'ssh -T git@git.starbeamrainbowlabs.com || ssh-keyscan -H git.starbeamrainbowlabs.com >>$HOME/.ssh/known_hosts';
|
||||
RUN 'sudo -E ssh -T git@git.starbeamrainbowlabs.com || ssh-keyscan -H git.starbeamrainbowlabs.com | sudo tee -a /root/.ssh/known_hosts';
|
||||
|
||||
RUN "[[ -d /etc/cluster-config ]] && { cd /etc/cluster-config; sudo -E git pull; } || sudo -E git clone git@git.starbeamrainbowlabs.com:sbrl/cluster-config.git /etc/cluster-config";
|
25
jobs/consul-client/consul-client.job
Executable file
25
jobs/consul-client/consul-client.job
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT "jobs/consul-server/common.sh";
|
||||
|
||||
COPY "../consul/client.hcl" "/tmp/client.hcl"
|
||||
|
||||
RUN "sudo mv /tmp/client.hcl /etc/consul/client.hcl";
|
||||
RUN "sudo chown root:root /etc/consul/client.hcl";
|
||||
RUN "sudo apt-get update";
|
||||
RUN "sudo apt-get install --yes hashicorp-consul-systemd-client";
|
||||
|
||||
# Do we need these as a client? I'm guessing not....?
|
||||
# If we run into issues, we'll unblock them.
|
||||
# RUN "sudo ufw allow 8301 comment consul-serf-lan";
|
||||
# # RUN "sudo ufw allow 8300/tcp comment consul-rpc";
|
||||
# RUN "sudo ufw allow 8600 comment consul-dns";
|
||||
|
||||
# Commented out since we're now running over wgoverlay & allowing all traffic on that interface because dynamic ports
|
||||
# Delete old ufw rules
|
||||
RUN "sudo ufw delete allow 8301 comment consul-serf-lan";
|
||||
RUN "sudo ufw delete allow 8300/tcp comment consul-rpc";
|
||||
RUN "sudo ufw delete allow 8600 comment consul-dns";
|
||||
|
||||
RUN "sudo systemctl enable consul.service";
|
||||
RUN "sudo systemctl restart consul.service";
|
25
jobs/consul-server/common.sh
Executable file
25
jobs/consul-server/common.sh
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
log_msg() {
|
||||
echo -e "[ ${HOSTNAME} ] [ $(date -u +"%Y-%m-%dT%H:%M:%SZ") ] $@" >&2;
|
||||
}
|
||||
|
||||
|
||||
log_msg "Installing consul";
|
||||
sudo apt-get update;
|
||||
sudo apt-get install --yes hashicorp-consul;
|
||||
|
||||
log_msg "consul installed successfully. Version:";
|
||||
consul --version;
|
||||
|
||||
log_msg "Creating directories";
|
||||
if [[ ! -d "/etc/consul" ]]; then
|
||||
sudo mkdir /etc/consul;
|
||||
sudo chown root:root /etc/consul;
|
||||
fi
|
||||
|
||||
if [[ ! -d "/srv/consul" ]]; then
|
||||
sudo mkdir /srv/consul;
|
||||
sudo chown root:root /srv/consul;
|
||||
sudo chmod 0750 /srv/consul;
|
||||
fi
|
25
jobs/consul-server/consul-server.job
Executable file
25
jobs/consul-server/consul-server.job
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT "${JOBFILE_DIR}/common.sh";
|
||||
|
||||
COPY "../consul/server.hcl" "/tmp/server.hcl"
|
||||
|
||||
RUN "sudo mv /tmp/server.hcl /etc/consul/server.hcl";
|
||||
RUN "sudo chown root:root /etc/consul/server.hcl";
|
||||
RUN "sudo apt-get update";
|
||||
RUN "sudo apt-get install --yes hashicorp-consul-systemd-server";
|
||||
|
||||
# TODO: Restrict these to the wesher subnet, and add 8500/tcp (the HTTP API)
|
||||
# TODO: Do that for Nomad too
|
||||
# RUN "sudo ufw allow 8301 comment consul-serf-lan";
|
||||
# RUN "sudo ufw allow 8300/tcp comment consul-rpc";
|
||||
# RUN "sudo ufw allow 8600 comment consul-dns";
|
||||
|
||||
# Commented out since we're now running over wgoverlay & allowing all traffic on that interface because dynamic ports
|
||||
# Delete old ufw rules
|
||||
RUN "sudo ufw delete allow 8301 comment consul-serf-lan";
|
||||
RUN "sudo ufw delete allow 8300/tcp comment consul-rpc";
|
||||
RUN "sudo ufw delete allow 8600 comment consul-dns";
|
||||
|
||||
RUN "sudo systemctl enable consul.service";
|
||||
RUN "sudo systemctl restart consul.service";
|
3
jobs/dns/dns.job
Executable file
3
jobs/dns/dns.job
Executable file
|
@ -0,0 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT "${JOBFILE_DIR}/dns.sh";
|
39
jobs/dns/dns.sh
Executable file
39
jobs/dns/dns.sh
Executable file
|
@ -0,0 +1,39 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Usage:
|
||||
# configLine [searchPattern] [replaceLine] [filePath]
|
||||
# Ref https://stackoverflow.com/a/54909102/1460422
|
||||
config_line() {
|
||||
local OLD_LINE_PATTERN=$1; shift
|
||||
local NEW_LINE=$1; shift
|
||||
local FILE=$1
|
||||
local NEW=$(echo "${NEW_LINE}" | sed 's/\//\\\//g')
|
||||
touch "${FILE}"
|
||||
sed -i '/'"${OLD_LINE_PATTERN}"'/{s/.*/'"${NEW}"'/;h};${x;/./{x;q100};x}' "${FILE}"
|
||||
if [[ $? -ne 100 ]] && [[ ${NEW_LINE} != '' ]]
|
||||
then
|
||||
echo "${NEW_LINE}" >> "${FILE}"
|
||||
fi
|
||||
}
|
||||
|
||||
log_msg() {
|
||||
echo -e "[ ${HOSTNAME} ] [ $(date -u +"%Y-%m-%dT%H:%M:%SZ") ] $@" >&2;
|
||||
}
|
||||
|
||||
# Old approach - we've learned we can override /etc/network/interfaces via /etc/dhcp/dhclient.conf
|
||||
# config_line "dns-nameservers" "dns-nameservers 172.16.230.100" "/etc/network/interfaces";
|
||||
|
||||
completed_file="/etc/dhcp/sbrl-dns-configured";
|
||||
|
||||
log_msg "Configuring DNS nameserver";
|
||||
if [[ ! -f "${completed_file}" ]]; then
|
||||
log_msg "DNS nameserver not yet configured, appending to /etc/dhcp/dhclient.conf";
|
||||
echo "interface \"eth0\" {
|
||||
supersede domain-name-servers 172.16.230.100;
|
||||
}" | sudo tee -a /etc/dhcp/dhclient.conf;
|
||||
sudo touch "${completed_file}";
|
||||
|
||||
log_msg "Complete, this machine needs a reboot to activate the new config directives";
|
||||
else
|
||||
log_msg "DNS nameserver config written already, no changes made";
|
||||
fi
|
14
jobs/docker/docker.job
Executable file
14
jobs/docker/docker.job
Executable file
|
@ -0,0 +1,14 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Fix apt update errors
|
||||
# Ref https://blog.samcater.com/fix-workaround-rpi4-docker-libseccomp2-docker-20/
|
||||
RUN "sudo apt-get install libseccomp2 -t buster-backports";
|
||||
|
||||
# RUN "curl -fsSL https://get.docker.com -o /tmp/get-docker.sh";
|
||||
# RUN "chmod +x /tmp/get-docker.sh";
|
||||
# RUN "sudo sh /tmp/get-docker.sh";
|
||||
RUN 'echo "deb [arch=$(dpkg --print-architecture)] http://download.docker.com/linux/raspbian buster stable" | sudo tee /etc/apt/sources.list.d/docker.list';
|
||||
RUN "sudo apt-get update";
|
||||
RUN "sudo apt-get install --yes docker-ce";
|
||||
RUN "if [[ ! -e \"/etc/docker/daemon.json\" ]]; then echo '{\"insecure-registries\":[\"registry.service.mooncarrot.space:5000\"]}' | sudo tee /etc/docker/daemon.json; fi";
|
||||
RUN "sudo systemctl reload docker.service";
|
6
jobs/install-utils-python/install-utils-python.job
Executable file
6
jobs/install-utils-python/install-utils-python.job
Executable file
|
@ -0,0 +1,6 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-get install python3-pip";
|
||||
|
||||
RUN "sudo pip3 install setuptools";
|
||||
RUN "sudo pip3 install quarry";
|
3
jobs/install-utils/install-utils.job
Executable file
3
jobs/install-utils/install-utils.job
Executable file
|
@ -0,0 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-get install --no-install-recommends --yes git net-tools dnsutils uptimed software-properties-common iftop iotop pv screen netcat f3 less optipng jpegoptim lsof strace ddrescue ffmpeg lnav jq";
|
16
jobs/laminar-sshkey/laminar-sshkey.job
Executable file
16
jobs/laminar-sshkey/laminar-sshkey.job
Executable file
|
@ -0,0 +1,16 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
sshkey="$(mktemp --tmpdir "laminarci-sshkey-pub-XXXXXXX")";
|
||||
echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICzYSWsGbWHh+cDheHJSKlCrxYDLwS6SKwVMV9SAaGsO laminar-ci@elessar" >"${sshkey}";
|
||||
|
||||
upload_target="/tmp/laminarci-sshkey-pub";
|
||||
|
||||
COPY "${sshkey}" "${upload_target}";
|
||||
|
||||
RUN "sudo mkdir -p /root/.ssh";
|
||||
RUN_ONCE "cat '${upload_target}' | sudo tee /root/.ssh/authorized_keys";
|
||||
RUN "rm '${upload_target}'";
|
||||
|
||||
# Set permissions
|
||||
RUN "sudo chown -R root:root /root/.ssh";
|
||||
RUN "sudo chmod 0600 /root/.ssh/authorized_keys";
|
4
jobs/maintenance/maintenance.job
Executable file
4
jobs/maintenance/maintenance.job
Executable file
|
@ -0,0 +1,4 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Remove any unused Docker images
|
||||
RUN "sudo docker image prune --force";
|
5
jobs/nfs/cluster-shared-nfs
Executable file
5
jobs/nfs/cluster-shared-nfs
Executable file
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
if ! mountpoint -q /mnt/shared; then mount /mnt/shared; fi
|
||||
if ! mountpoint -q /mnt/elfstone; then mount /mnt/elfstone; fi
|
||||
if ! mountpoint -q /mnt/elessar-music; then mount /mnt/elessar-music; fi
|
||||
if ! mountpoint -q /mnt/elessar-syncthing; then mount /mnt/elessar-syncthing; fi
|
46
jobs/nfs/nfs-setup.sh
Executable file
46
jobs/nfs/nfs-setup.sh
Executable file
|
@ -0,0 +1,46 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo "[job/nfs] Checking NFS mounts" >&2;
|
||||
|
||||
# _netdev The filesystem resides on a device that requires network access (used to prevent the system from attempting to mount these filesystems until the network has been enabled on the system)
|
||||
# nofail Do not report errors for this device if it does not exist.
|
||||
# auto Auto-mount on boot
|
||||
# noatime No last-access time
|
||||
# tcp Use tcp, not udp
|
||||
# bg The system won't block during boot until it is able to mount the filesystem, but won't give up on trying to mount it either.
|
||||
# timeo=VALUE The time in deciseconds (tenths of a second) the NFS client waits for a response before it retries an NFS request.
|
||||
# retrans=n The number of times the NFS client retries a request before it attempts further recovery action.
|
||||
|
||||
|
||||
if [[ ! -d /mnt/shared ]]; then
|
||||
echo "[job/nfs] Creating mount for shared cluster data" >&2;
|
||||
sudo mkdir -p /mnt/shared;
|
||||
echo 'magicbag.node.mooncarrot.space:/mnt/elfstone2/cluster /mnt/shared nfs auto,nofail,noatime,_netdev,tcp,bg,timeo=50,retrans=5 0 0' | sudo tee -a /etc/fstab;
|
||||
sudo mount /mnt/shared;
|
||||
fi
|
||||
|
||||
|
||||
if [[ ! -d /mnt/elfstone ]]; then
|
||||
echo "[job/nfs] Creating mount for elfstone" >&2;
|
||||
sudo mkdir -p /mnt/elfstone;
|
||||
echo 'magicbag.node.mooncarrot.space:/mnt/elfstone2/main /mnt/elfstone nfs auto,nofail,noatime,_netdev,tcp,bg,timeo=50,retrans=5 0 0' | sudo tee -a /etc/fstab;
|
||||
sudo mount /mnt/elfstone;
|
||||
fi
|
||||
|
||||
|
||||
if [[ ! -d /mnt/elessar-music ]]; then
|
||||
echo "[job/nfs] Creating mount for elessar-music" >&2;
|
||||
sudo mkdir -p /mnt/elessar-music;
|
||||
echo 'magicbag.node.mooncarrot.space:/mnt/elfstone2/syncthing/Music /mnt/elessar-music nfs auto,nofail,_netdev,noatime,tcp,bg,timeo=50,retrans=5 0 0' | sudo tee -a /etc/fstab;
|
||||
sudo mount /mnt/elessar-music;
|
||||
fi
|
||||
|
||||
|
||||
if [[ ! -d /mnt/elessar-syncthing ]]; then
|
||||
echo "[job/nfs] Creating mount for elessar-syncthing" >&2;
|
||||
sudo mkdir -p /mnt/elessar-syncthing;
|
||||
echo 'magicbag.node.mooncarrot.space:/mnt/elfstone2/syncthing /mnt/elessar-syncthing nfs auto,nofail,_netdev,noatime,tcp,bg,timeo=50,retrans=5 0 0' | sudo tee -a /etc/fstab;
|
||||
sudo mount /mnt/elessar-syncthing;
|
||||
fi
|
||||
|
||||
echo "[job/nfs] Complete" >&2;
|
11
jobs/nfs/nfs.job
Executable file
11
jobs/nfs/nfs.job
Executable file
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-get update";
|
||||
RUN "sudo apt-get install --yes nfs-common";
|
||||
|
||||
SCRIPT "${JOBFILE_DIR}/nfs-setup.sh"
|
||||
|
||||
COPY "${JOBFILE_DIR}/cluster-shared-nfs" "/tmp/cluster-shared-nfs";
|
||||
RUN "sudo mv /tmp/cluster-shared-nfs /etc/network/if-up.d/cluster-shared-nfs";
|
||||
RUN "sudo chown root:root /etc/network/if-up.d/cluster-shared-nfs";
|
||||
RUN "sudo chmod +x /etc/network/if-up.d/cluster-shared-nfs"
|
63
jobs/nomad-both/common.sh
Executable file
63
jobs/nomad-both/common.sh
Executable file
|
@ -0,0 +1,63 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
log_msg() {
|
||||
echo -e "[ ${HOSTNAME} ] [ $(date -u +"%Y-%m-%dT%H:%M:%SZ") ] $*" >&2;
|
||||
}
|
||||
|
||||
|
||||
log_msg "Installing nomad";
|
||||
sudo apt-get update;
|
||||
sudo apt-get install --yes hashicorp-nomad;
|
||||
|
||||
log_msg "nomad installed successfully. Version:";
|
||||
nomad --version;
|
||||
|
||||
log_msg "Creating directories";
|
||||
if [[ ! -d "/etc/nomad" ]]; then
|
||||
sudo mkdir /etc/nomad;
|
||||
sudo chown root:root /etc/nomad;
|
||||
fi
|
||||
|
||||
if [[ ! -d "/srv/nomad" ]]; then
|
||||
sudo mkdir /srv/nomad;
|
||||
sudo chown root:root /srv/nomad;
|
||||
sudo chmod 0750 /srv/nomad;
|
||||
fi
|
||||
|
||||
check_cgroups_memory() {
|
||||
log_msg "Checking memory cgroups";
|
||||
|
||||
cgroups_enabled="$(awk '/memory/ { print $2 }' < /proc/cgroups)";
|
||||
|
||||
if [[ "${cgroups_enabled}" -ne 0 ]]; then
|
||||
log_msg "memory cgroups already enabled";
|
||||
return 0;
|
||||
fi
|
||||
|
||||
|
||||
filepath_cmdline="/boot/cmdline.txt";
|
||||
if [[ ! -e "${filepath_cmdline}" ]]; then
|
||||
filepath_cmdline="/boot/firmware/cmdline.txt";
|
||||
fi
|
||||
if [[ ! -e "${filepath_cmdline}" ]]; then
|
||||
log_msg "Failed to find cmdline.txt; can't check for cgroups";
|
||||
return 1;
|
||||
fi
|
||||
|
||||
if grep -q cgroup_enable=memory /boot/cmdline.txt; then
|
||||
log_msg "memory cgroups already present in cmdline.txt, a reboot is required to apply the update";
|
||||
return 0;
|
||||
fi
|
||||
|
||||
log_msg "memory cgroups not present in cmdline.txt, enabling....";
|
||||
(tr -d '\n' <"${filepath_cmdline}" && echo " cgroup_enable=memory cgroup_memory=1") | sudo tee "${filepath_cmdline}.new";
|
||||
|
||||
sudo mv "${filepath_cmdline}" "${filepath_cmdline}.old-$(date +"%Y-%m-%d")";
|
||||
sudo mv "${filepath_cmdline}.new" "${filepath_cmdline}";
|
||||
|
||||
log_msg "New contents of cmdline.txt:";
|
||||
cat "${filepath_cmdline}";
|
||||
log_msg "A reboot is required to apply the changes.";
|
||||
}
|
||||
|
||||
check_cgroups_memory;
|
22
jobs/nomad-both/nomad-both.job
Executable file
22
jobs/nomad-both/nomad-both.job
Executable file
|
@ -0,0 +1,22 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT "${JOBFILE_DIR}/common.sh";
|
||||
|
||||
COPY "../nomad/both.hcl" "/tmp/both.hcl"
|
||||
|
||||
RUN "sudo mv /tmp/both.hcl /etc/nomad/both.hcl";
|
||||
RUN "sudo chown root:root /etc/nomad/both.hcl";
|
||||
|
||||
RUN "sudo apt-get install --yes hashicorp-nomad-systemd-both";
|
||||
|
||||
|
||||
# Commented out since we're now running over wgoverlay & allowing all traffic on that interface because dynamic ports
|
||||
# RUN "sudo ufw allow 4646/tcp comment nomad-http";
|
||||
# RUN "sudo ufw allow 4647/tcp comment nomad-rpc";
|
||||
# RUN "sudo ufw allow 4648/tcp comment nomad-serf";
|
||||
RUN "sudo ufw delete allow 4646/tcp comment nomad-http";
|
||||
RUN "sudo ufw delete allow 4647/tcp comment nomad-rpc";
|
||||
RUN "sudo ufw delete allow 4648/tcp comment nomad-serf";
|
||||
|
||||
RUN "sudo systemctl enable nomad.service";
|
||||
RUN "sudo systemctl restart nomad.service";
|
21
jobs/nomad-client/nomad-client.job
Executable file
21
jobs/nomad-client/nomad-client.job
Executable file
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT "jobs/nomad-both/common.sh";
|
||||
|
||||
COPY "../nomad/client.hcl" "/tmp/client.hcl"
|
||||
|
||||
RUN "sudo mv /tmp/client.hcl /etc/nomad/client.hcl";
|
||||
RUN "sudo chown root:root /etc/nomad/client.hcl";
|
||||
|
||||
RUN "sudo apt-get install --yes hashicorp-nomad-systemd-client";
|
||||
|
||||
# Commented out since we're now running over wgoverlay & allowing all traffic on that interface because dynamic ports
|
||||
# RUN "sudo ufw allow 4646/tcp comment nomad-http";
|
||||
# RUN "sudo ufw allow 4647/tcp comment nomad-rpc";
|
||||
# RUN "sudo ufw allow 4648/tcp comment nomad-serf";
|
||||
RUN "sudo ufw delete allow 4646/tcp comment nomad-http";
|
||||
RUN "sudo ufw delete allow 4647/tcp comment nomad-rpc";
|
||||
RUN "sudo ufw delete allow 4648/tcp comment nomad-serf";
|
||||
|
||||
RUN "sudo systemctl enable nomad.service";
|
||||
RUN "sudo systemctl restart nomad.service";
|
0
jobs/preconfigure/misc
Normal file
0
jobs/preconfigure/misc
Normal file
11
jobs/preconfigure/preconfigure.job
Executable file
11
jobs/preconfigure/preconfigure.job
Executable file
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-get install -y logrotate"
|
||||
|
||||
COPY "${JOBFILE_DIR}/misc" "/tmp/misc"
|
||||
RUN "sudo mv /tmp/misc /etc/logrotate.d/misc"
|
||||
RUN "sudo chown root:root /etc/logrotate.d/misc"
|
||||
|
||||
# Fix logrotate issue
|
||||
RUN "sudo chmod 0750 /var/log"
|
||||
RUN "sudo systemctl restart logrotate"
|
7
jobs/sbrl-apt-repo/sbrl-apt-repo.job
Executable file
7
jobs/sbrl-apt-repo/sbrl-apt-repo.job
Executable file
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "echo 'deb http://apt.starbeamrainbowlabs.com/ ./ # apt.starbeamrainbowlabs.com' | sudo tee /etc/apt/sources.list.d/sbrl.list";
|
||||
|
||||
RUN "sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys D48D801C6A66A5D8";
|
||||
|
||||
RUN "sudo apt-get update";
|
4
jobs/ssh-preconnect/ssh-preconnect.job
Executable file
4
jobs/ssh-preconnect/ssh-preconnect.job
Executable file
|
@ -0,0 +1,4 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN 'ssh -T git@git.starbeamrainbowlabs.com || ssh-keyscan -H git.starbeamrainbowlabs.com >>$HOME/.ssh/known_hosts';
|
||||
RUN 'sudo -E ssh -T git@git.starbeamrainbowlabs.com || ssh-keyscan -H git.starbeamrainbowlabs.com | sudo tee -a /root/.ssh/known_hosts';
|
7
jobs/test/test.job
Executable file
7
jobs/test/test.job
Executable file
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "uptime";
|
||||
|
||||
COPY "${JOBFILE_DIR}/testfile.txt" "/tmp";
|
||||
|
||||
RUN "rm /tmp/testfile.txt";
|
1
jobs/test/testfile.txt
Normal file
1
jobs/test/testfile.txt
Normal file
|
@ -0,0 +1 @@
|
|||
it works!
|
9
jobs/ufw/ufw.job
Executable file
9
jobs/ufw/ufw.job
Executable file
|
@ -0,0 +1,9 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-get update";
|
||||
|
||||
RUN "sudo apt-get install --yes ufw";
|
||||
|
||||
RUN "sudo ufw allow 22/tcp comment ssh";
|
||||
|
||||
RUN "echo 'y' | sudo ufw enable";
|
3
jobs/update-system/update-system.job
Executable file
3
jobs/update-system/update-system.job
Executable file
|
@ -0,0 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
SCRIPT "${JOBFILE_DIR}/update-system.sh";
|
17
jobs/update-system/update-system.sh
Executable file
17
jobs/update-system/update-system.sh
Executable file
|
@ -0,0 +1,17 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
log_msg() {
|
||||
echo -e "[ ${HOSTNAME} ] [ $(date -u +"%Y-%m-%dT%H:%M:%SZ") ] $@" >&2;
|
||||
}
|
||||
|
||||
log_msg "Starting update";
|
||||
sudo apt-get update;
|
||||
|
||||
log_msg "Starting upgrade";
|
||||
sudo apt-get dist-upgrade --yes;
|
||||
|
||||
log_msg "Starting autoremove";
|
||||
sudo apt-get autoremove --yes;
|
||||
|
||||
log_msg "Starting autoclean";
|
||||
sudo apt-get autoclean;
|
11
jobs/vault/vault.job
Executable file
11
jobs/vault/vault.job
Executable file
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-get install -y hashicorp-vault hashicorp-vault-systemd";
|
||||
|
||||
COPY "../vault/server.hcl" "/tmp/vault-server.hcl";
|
||||
RUN "sudo chown root:root /tmp/vault-server.hcl";
|
||||
RUN "sudo mkdir -p /etc/vault";
|
||||
RUN "sudo mv /tmp/vault-server.hcl /etc/vault/server.hcl";
|
||||
|
||||
RUN "sudo systemctl enable vault.service";
|
||||
RUN "sudo systemctl start vault.service";
|
27
jobs/wesher/wesher.job
Executable file
27
jobs/wesher/wesher.job
Executable file
|
@ -0,0 +1,27 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo "Don't forget to run the wireguard job first";
|
||||
|
||||
# Sort out log rotation
|
||||
COPY "${JOBFILE_DIR}/wesher.logrotate" "/tmp/wesher.logrotate"
|
||||
RUN "sudo mv /tmp/wesher.logrotate /etc/logrotate.d/wesher"
|
||||
RUN "sudo chown root:root /etc/logrotate.d/wesher"
|
||||
|
||||
RUN "sudo apt-get update";
|
||||
|
||||
RUN "sudo apt-get install --yes wesher wesher-systemd";
|
||||
|
||||
RUN "sudo ufw allow 7946 comment wesher-gossip";
|
||||
RUN "sudo ufw allow 51820/udp comment wesher-wireguard";
|
||||
|
||||
# Allow all traffic from the VPN; we don't actually know what ports Nomad will use
|
||||
RUN "sudo ufw allow in on wgoverlay";
|
||||
|
||||
# Activate the new logging system
|
||||
RUN "sudo systemctl restart logrotate"
|
||||
|
||||
# See the Keepass entry in our database for copy-paste commands to join nodes.
|
||||
# Basically:
|
||||
# sudo wesher --cluster-key CLUSTER_KEY_HERE --join 172.16.230.100 --overlay-net 172.31.250.0/16 --log-level info
|
||||
# sudo systemctl enable --now wesher.service
|
||||
# sudo systemctl status wesher.service
|
12
jobs/wesher/wesher.logrotate
Normal file
12
jobs/wesher/wesher.logrotate
Normal file
|
@ -0,0 +1,12 @@
|
|||
/var/log/wesher/*.log {
|
||||
rotate 12
|
||||
weekly
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
delaycompress
|
||||
sharedscripts
|
||||
postrotate
|
||||
invoke-rc.d rsyslog rotate >/dev/null 2>&1 || true
|
||||
endscript
|
||||
}
|
11
jobs/wireguard/wireguard.job
Executable file
11
jobs/wireguard/wireguard.job
Executable file
|
@ -0,0 +1,11 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
RUN "sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 04EE7237B7D453EC 648ACFD622F3D138";
|
||||
|
||||
RUN "echo 'deb http://httpredir.debian.org/debian buster-backports main contrib non-free' | sudo tee /etc/apt/sources.list.d/debian-backports.list";
|
||||
|
||||
RUN "sudo apt-get update";
|
||||
|
||||
RUN "sudo apt-get install --yes raspberrypi-kernel-headers";
|
||||
|
||||
RUN "sudo apt-get install --yes wireguard";
|
5
list-hostgroups
Executable file
5
list-hostgroups
Executable file
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
# Make sure the current directory is the location of this script to simplify matters
|
||||
cd "$(dirname "$(readlink -f "$0")")";
|
||||
|
||||
find hosts -type f -iname '*.txt' | xargs -n1 basename --suffix=.txt | sort
|
5
list-jobs
Executable file
5
list-jobs
Executable file
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env bash
|
||||
# Make sure the current directory is the location of this script to simplify matters
|
||||
cd "$(dirname "$(readlink -f "$0")")";
|
||||
|
||||
ls ./jobs/
|
18
new-job
Executable file
18
new-job
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/env bash
|
||||
# Make sure the current directory is the location of this script to simplify matters
|
||||
cd "$(dirname "$(readlink -f "$0")")";
|
||||
|
||||
job_name="$1";
|
||||
|
||||
if [[ -z "${job_name}" ]]; then
|
||||
echo "Usage:";
|
||||
echo " ./new-job {job_name}";
|
||||
exit;
|
||||
fi
|
||||
|
||||
job_filename="./jobs/${job_name}/${job_name}.job";
|
||||
|
||||
mkdir -p "$(dirname "${job_filename}")";
|
||||
|
||||
echo -e "#!/usr/bin/env bash\n" >"${job_filename}";
|
||||
chmod +x "${job_filename}";
|
46
ssh-config
Normal file
46
ssh-config
Normal file
|
@ -0,0 +1,46 @@
|
|||
ServerAliveInterval 120
|
||||
|
||||
Host *
|
||||
ControlMaster auto
|
||||
ControlPath /tmp/%r@%h:%p
|
||||
|
||||
|
||||
Host cluster-wopplefox
|
||||
Hostname 172.16.230.100
|
||||
Port 22
|
||||
|
||||
User root
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
ForwardAgent yes
|
||||
Host cluster-sycadil
|
||||
Hostname 172.16.230.102
|
||||
Port 22
|
||||
|
||||
User dietpi
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
ForwardAgent yes
|
||||
Host cluster-spatterling
|
||||
Hostname 172.16.230.101
|
||||
Port 22
|
||||
|
||||
User dietpi
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
ForwardAgent yes
|
||||
Host cluster-balusha
|
||||
Hostname 172.16.230.103
|
||||
Port 22
|
||||
|
||||
User dietpi
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
ForwardAgent yes
|
||||
Host cluster-silverleaf
|
||||
# braskin
|
||||
# toko
|
||||
# *silverleaf*
|
||||
# elligon: router
|
||||
Hostname 172.16.230.104
|
||||
Port 22
|
||||
|
||||
User dietpi
|
||||
IdentityFile ~/.ssh/id_ed25519
|
||||
ForwardAgent yes
|
Loading…
Reference in a new issue