This commit is contained in:
Joey Hafner 2024-10-21 15:27:09 -07:00
parent 5f094a2164
commit de9b8a5245
No known key found for this signature in database
33 changed files with 2492 additions and 0 deletions

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,9 @@
[Unit]
Description=GoXLR Utility Daemon
After=network.target
[Service]
ExecStart=/usr/bin/goxlr-daemon
[Install]
WantedBy=default.target

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,86 @@
[
{
"name": "Left Display",
"padding": 10,
"zones": [
{
"x": 50,
"y": 0,
"height": 100,
"width": 50
},
{
"x": 0,
"y": 0,
"height": 50,
"width": 50
},
{
"x": 0,
"y": 50,
"height": 50,
"width": 50
}
]
},
{
"name": "Middle Display",
"padding": 10,
"zones": [
{
"x": 0,
"y": 0,
"height": 100,
"width": 50
},
{
"x": 50,
"y": 0,
"height": 100,
"width": 50
}
]
},
{
"name": "Right Display",
"padding": 10,
"zones": [
{
"x": 0,
"y": 55,
"height": 45,
"width": 50
},
{
"x": 0,
"y": 0,
"height": 55,
"width": 50
},
{
"x": 50,
"y": 0,
"height": 45,
"width": 50
},
{
"x": 50,
"y": 45,
"height": 55,
"width": 50
}
]
},
{
"name": "Fullscreen",
"padding": 10,
"zones": [
{
"x": 0,
"y": 0,
"height": 100,
"width": 100
}
]
}
]

View File

@ -0,0 +1,634 @@
version: 3
profiles:
- type: ssh
name: jafner.tools
icon: fas fa-desktop
options:
host: 143.110.151.123
algorithms:
hmac:
- hmac-sha1
- hmac-sha1-etm@openssh.com
- hmac-sha2-256
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-512
- hmac-sha2-512-etm@openssh.com
kex:
- curve25519-sha256
- curve25519-sha256@libssh.org
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group14-sha256
- diffie-hellman-group15-sha512
- diffie-hellman-group16-sha512
- diffie-hellman-group17-sha512
- diffie-hellman-group18-sha512
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
- ecdh-sha2-nistp521
cipher:
- aes128-ctr
- aes128-gcm
- aes128-gcm@openssh.com
- aes192-ctr
- aes256-ctr
- aes256-gcm
- aes256-gcm@openssh.com
serverHostKey:
- ecdsa-sha2-nistp256
- ecdsa-sha2-nistp384
- ecdsa-sha2-nistp521
- rsa-sha2-256
- rsa-sha2-512
- ssh-ed25519
- ssh-rsa
privateKeys:
- file://C:\Users\jafne\.ssh\id_rsa
- file://C:\Users\jafne\.ssh\main_id_rsa
forwardedPorts: []
scripts: []
weight: -1
disableDynamicTitle: false
id: ssh:custom:jafner.tools:0191700a-8cce-44d4-b87e-811f02425043
- type: ssh
icon: fas fa-desktop
options:
host: 143.110.225.17
algorithms:
hmac:
- hmac-sha1
- hmac-sha1-etm@openssh.com
- hmac-sha2-256
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-512
- hmac-sha2-512-etm@openssh.com
kex:
- curve25519-sha256
- curve25519-sha256@libssh.org
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group14-sha256
- diffie-hellman-group15-sha512
- diffie-hellman-group16-sha512
- diffie-hellman-group17-sha512
- diffie-hellman-group18-sha512
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
- ecdh-sha2-nistp521
cipher:
- aes128-ctr
- aes128-gcm
- aes128-gcm@openssh.com
- aes192-ctr
- aes256-ctr
- aes256-gcm
- aes256-gcm@openssh.com
serverHostKey:
- ecdsa-sha2-nistp256
- ecdsa-sha2-nistp384
- ecdsa-sha2-nistp521
- rsa-sha2-256
- rsa-sha2-512
- ssh-ed25519
- ssh-rsa
privateKeys:
- file://C:\Users\jafne\.ssh\id_rsa
- file://C:\Users\jafne\.ssh\main_id_rsa
weight: -1
disableDynamicTitle: false
name: birch (digitalocean)
id: ssh:custom:birch-(digitalocean):7a35227a-b09c-49ea-afa3-b56bcfeaa21d
- type: ssh
icon: fas fa-desktop
options:
host: 190.102.110.135
algorithms:
hmac:
- hmac-sha1
- hmac-sha1-etm@openssh.com
- hmac-sha2-256
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-512
- hmac-sha2-512-etm@openssh.com
kex:
- curve25519-sha256
- curve25519-sha256@libssh.org
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group14-sha256
- diffie-hellman-group15-sha512
- diffie-hellman-group16-sha512
- diffie-hellman-group17-sha512
- diffie-hellman-group18-sha512
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
- ecdh-sha2-nistp521
cipher:
- aes128-ctr
- aes128-gcm
- aes128-gcm@openssh.com
- aes192-ctr
- aes256-ctr
- aes256-gcm
- aes256-gcm@openssh.com
serverHostKey:
- ecdsa-sha2-nistp256
- ecdsa-sha2-nistp384
- ecdsa-sha2-nistp521
- rsa-sha2-256
- rsa-sha2-512
- ssh-ed25519
- ssh-rsa
privateKeys:
- file://C:\Users\jafne\.ssh\id_rsa
- file://C:\Users\jafne\.ssh\main_id_rsa
user: admin
forwardedPorts: []
scripts: []
weight: -1
disableDynamicTitle: false
name: archer.industries (ssdnodes)
id: >-
ssh:custom:archer.industries-(ssdnodes):ffdc70ca-786f-4830-b8eb-944ea1a2b4b9
- type: ssh
name: NAS2
icon: fas fa-desktop
options:
host: 192.168.1.11
algorithms:
hmac:
- hmac-sha1
- hmac-sha1-etm@openssh.com
- hmac-sha2-256
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-512
- hmac-sha2-512-etm@openssh.com
kex:
- curve25519-sha256
- curve25519-sha256@libssh.org
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group14-sha256
- diffie-hellman-group15-sha512
- diffie-hellman-group16-sha512
- diffie-hellman-group17-sha512
- diffie-hellman-group18-sha512
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
- ecdh-sha2-nistp521
cipher:
- aes128-ctr
- aes128-gcm
- aes128-gcm@openssh.com
- aes192-ctr
- aes256-ctr
- aes256-gcm
- aes256-gcm@openssh.com
serverHostKey:
- ecdsa-sha2-nistp256
- ecdsa-sha2-nistp384
- ecdsa-sha2-nistp521
- rsa-sha2-256
- rsa-sha2-512
- ssh-ed25519
- ssh-rsa
privateKeys:
- file://C:\Users\jafne\.ssh\id_rsa
user: admin
forwardedPorts: []
scripts: []
weight: -1
disableDynamicTitle: false
id: ssh:custom:NAS2:7fa3249a-ae20-477d-a9f5-809d31b4012b
- type: ssh
name: Wysehole
icon: fas fa-desktop
options:
host: 192.168.1.32
algorithms:
hmac:
- hmac-sha1
- hmac-sha1-etm@openssh.com
- hmac-sha2-256
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-512
- hmac-sha2-512-etm@openssh.com
kex:
- curve25519-sha256
- curve25519-sha256@libssh.org
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group14-sha256
- diffie-hellman-group15-sha512
- diffie-hellman-group16-sha512
- diffie-hellman-group17-sha512
- diffie-hellman-group18-sha512
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
- ecdh-sha2-nistp521
cipher:
- aes128-ctr
- aes128-gcm
- aes128-gcm@openssh.com
- aes192-ctr
- aes256-ctr
- aes256-gcm
- aes256-gcm@openssh.com
serverHostKey:
- ecdsa-sha2-nistp256
- ecdsa-sha2-nistp384
- ecdsa-sha2-nistp521
- rsa-sha2-256
- rsa-sha2-512
- ssh-ed25519
- ssh-rsa
privateKeys:
- file://C:\Users\jafne\.ssh\id_rsa
user: joey
forwardedPorts: []
scripts: []
weight: -1
disableDynamicTitle: false
id: ssh:custom:Wysehole:fbc4c495-4413-4a75-a41c-2b3c0c21b51d
- type: ssh
name: Router
icon: fas fa-desktop
options:
host: 192.168.1.1
algorithms:
hmac:
- hmac-sha1
- hmac-sha1-etm@openssh.com
- hmac-sha2-256
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-512
- hmac-sha2-512-etm@openssh.com
kex:
- curve25519-sha256
- curve25519-sha256@libssh.org
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group14-sha256
- diffie-hellman-group15-sha512
- diffie-hellman-group16-sha512
- diffie-hellman-group17-sha512
- diffie-hellman-group18-sha512
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
- ecdh-sha2-nistp521
cipher:
- aes128-ctr
- aes128-gcm
- aes128-gcm@openssh.com
- aes192-ctr
- aes256-ctr
- aes256-gcm
- aes256-gcm@openssh.com
serverHostKey:
- ecdsa-sha2-nistp256
- ecdsa-sha2-nistp384
- ecdsa-sha2-nistp521
- rsa-sha2-256
- rsa-sha2-512
- ssh-ed25519
- ssh-rsa
privateKeys:
- file://C:\Users\jafne\.ssh\id_rsa
user: vyos
forwardedPorts: []
scripts: []
weight: -1
disableDynamicTitle: false
id: ssh:custom:Router:06d38873-c528-48ed-9287-91d89cfeda3b
- type: ssh
name: NAS
icon: fas fa-desktop
options:
host: 192.168.1.10
algorithms:
hmac:
- hmac-sha1
- hmac-sha1-etm@openssh.com
- hmac-sha2-256
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-512
- hmac-sha2-512-etm@openssh.com
kex:
- curve25519-sha256
- curve25519-sha256@libssh.org
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group14-sha256
- diffie-hellman-group15-sha512
- diffie-hellman-group16-sha512
- diffie-hellman-group17-sha512
- diffie-hellman-group18-sha512
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
- ecdh-sha2-nistp521
cipher:
- aes128-ctr
- aes128-gcm
- aes128-gcm@openssh.com
- aes192-ctr
- aes256-ctr
- aes256-gcm
- aes256-gcm@openssh.com
serverHostKey:
- ecdsa-sha2-nistp256
- ecdsa-sha2-nistp384
- ecdsa-sha2-nistp521
- rsa-sha2-256
- rsa-sha2-512
- ssh-ed25519
- ssh-rsa
privateKeys:
- file://C:\Users\jafne\.ssh\id_rsa
forwardedPorts: []
scripts: []
weight: -1
disableDynamicTitle: false
id: ssh:custom:NAS:d565a9ec-ada2-49a1-b622-21410670fd2c
- type: ssh
name: Joey-server
icon: fas fa-desktop
options:
host: 192.168.1.23
user: joey
algorithms:
hmac:
- hmac-sha1
- hmac-sha1-etm@openssh.com
- hmac-sha2-256
- hmac-sha2-256-etm@openssh.com
- hmac-sha2-512
- hmac-sha2-512-etm@openssh.com
kex:
- curve25519-sha256
- curve25519-sha256@libssh.org
- diffie-hellman-group-exchange-sha256
- diffie-hellman-group14-sha256
- diffie-hellman-group15-sha512
- diffie-hellman-group16-sha512
- diffie-hellman-group17-sha512
- diffie-hellman-group18-sha512
- ecdh-sha2-nistp256
- ecdh-sha2-nistp384
- ecdh-sha2-nistp521
cipher:
- aes128-ctr
- aes128-gcm
- aes128-gcm@openssh.com
- aes192-ctr
- aes256-ctr
- aes256-gcm
- aes256-gcm@openssh.com
serverHostKey:
- ecdsa-sha2-nistp256
- ecdsa-sha2-nistp384
- ecdsa-sha2-nistp521
- rsa-sha2-256
- rsa-sha2-512
- ssh-ed25519
- ssh-rsa
privateKeys:
- file://C:\Users\jafne\.ssh\id_rsa
forwardedPorts: []
scripts: []
weight: -1
disableDynamicTitle: false
id: ssh:custom:Joey-server:6a060fa1-3321-4d2b-a82f-0c964b39fbba
hotkeys:
toggle-window:
- Ctrl-Space
copy-current-path: []
ctrl-c:
- Ctrl-C
copy:
- Ctrl-Shift-C
paste:
- Ctrl-Shift-V
- Shift-Insert
select-all:
- Ctrl-Shift-A
clear: []
zoom-in:
- Ctrl-=
- Ctrl-Shift-=
zoom-out:
- Ctrl--
- Ctrl-Shift--
reset-zoom:
- Ctrl-0
home:
- Home
end:
- End
previous-word:
- Ctrl-Left
next-word:
- Ctrl-Right
delete-previous-word:
- Ctrl-Backspace
delete-line:
- Ctrl-Shift-Backspace
delete-next-word:
- Ctrl-Delete
search:
- Ctrl-Shift-F
pane-focus-all:
- Ctrl-Shift-I
focus-all-tabs:
- Ctrl-Alt-Shift-I
scroll-to-top:
- Ctrl-PageUp
scroll-up:
- Alt-PageUp
scroll-down:
- Alt-PageDown
scroll-to-bottom:
- Ctrl-PageDown
restart-telnet-session: []
restart-ssh-session: []
launch-winscp: []
settings-tab: {}
settings:
- Ctrl-,
serial:
- Alt-K
restart-serial-session: []
new-tab:
- Ctrl-Shift-T
new-window:
- Ctrl-Shift-N
profile: {}
profile-selectors: {}
toggle-fullscreen:
- F11
- Alt-Enter
close-tab:
- Ctrl-Shift-W
reopen-tab:
- Ctrl-Shift-Z
toggle-last-tab: []
rename-tab:
- Ctrl-Shift-R
next-tab:
- Ctrl-Shift-Right
- Ctrl-Tab
previous-tab:
- Ctrl-Shift-Left
- Ctrl-Shift-Tab
move-tab-left:
- Ctrl-Shift-PageUp
move-tab-right:
- Ctrl-Shift-PageDown
rearrange-panes:
- Ctrl-Shift
duplicate-tab: []
restart-tab: []
reconnect-tab: []
explode-tab:
- Ctrl-Shift-.
combine-tabs:
- Ctrl-Shift-,
tab-1:
- Alt-1
tab-2:
- Alt-2
tab-3:
- Alt-3
tab-4:
- Alt-4
tab-5:
- Alt-5
tab-6:
- Alt-6
tab-7:
- Alt-7
tab-8:
- Alt-8
tab-9:
- Alt-9
tab-10:
- Alt-0
tab-11: []
tab-12: []
tab-13: []
tab-14: []
tab-15: []
tab-16: []
tab-17: []
tab-18: []
tab-19: []
tab-20: []
split-right:
- Ctrl-Shift-S
split-bottom:
- Ctrl-Shift-D
split-left: []
split-top: []
pane-nav-right:
- Ctrl-Alt-Right
pane-nav-down:
- Ctrl-Alt-Down
pane-nav-up:
- Ctrl-Alt-Up
pane-nav-left:
- Ctrl-Alt-Left
pane-nav-previous:
- Ctrl-Alt-[
pane-nav-next:
- Ctrl-Alt-]
pane-nav-1: []
pane-nav-2: []
pane-nav-3: []
pane-nav-4: []
pane-nav-5: []
pane-nav-6: []
pane-nav-7: []
pane-nav-8: []
pane-nav-9: []
pane-maximize:
- Ctrl-Alt-Enter
close-pane: []
pane-increase-vertical: []
pane-decrease-vertical: []
pane-increase-horizontal: []
pane-decrease-horizontal: []
switch-profile:
- Ctrl-Alt-T
profile-selector:
- Ctrl-Shift-E
command-selector:
- Ctrl-Shift-P
terminal:
searchOptions: {}
colorScheme:
selection: null
cursorAccent: null
name: Tabby Default
foreground: '#cacaca'
background: '#171717'
cursor: '#bbbbbb'
colors:
- '#000000'
- '#ff615a'
- '#b1e969'
- '#ebd99c'
- '#5da9f6'
- '#e86aff'
- '#82fff7'
- '#dedacf'
- '#313131'
- '#f58c80'
- '#ddf88f'
- '#eee5b2'
- '#a5c7ff'
- '#ddaaff'
- '#b7fff9'
- '#ffffff'
rightClick: menu
ssh:
knownHosts:
- host: 192.168.1.23
port: 22
type: ecdsa-sha2-nistp256
digest: 9TQHBmerRpUkEf0/o4CkroAOXQs8qw0ICZYcW86NHt8=
- host: 192.168.1.10
port: 22
type: ecdsa-sha2-nistp256
digest: Q32sarmRaJKXZnRyyd0BcJ33CZO1cXExVmQ+VzSZC9Q=
- host: 192.168.1.1
port: 22
type: ecdsa-sha2-nistp256
digest: kRE8tUYuVivcqfXiAiIjK/tgqW5Y33LmBJditdTZi20=
- host: 192.168.1.32
port: 22
type: ecdsa-sha2-nistp256
digest: m9oTSzFYeIrAFWsT9FVIlrDVk/VFVn4qb5miAIcYdm0=
- host: 192.168.1.11
port: 22
type: ecdsa-sha2-nistp256
digest: d/Omp+Ze8X0U/DYX2ITl/+mp7KxEafSQfMaAKhi45lk=
- host: 190.102.110.135
port: 22
type: ecdsa-sha2-nistp256
digest: AqDQnJi1+52AjpwFS3qb0MMd+iY4S0FfGtMDN1ThHA8=
- host: 143.110.225.17
port: 22
type: ecdsa-sha2-nistp256
digest: 57vMh6x2Zghf9UOLoAK/YyrNnTs3D+mbibH7x4iC+kM=
- host: 143.110.151.123
port: 22
type: ecdsa-sha2-nistp256
digest: X+RthZbuSikzgiGSrlEWO4rLN9PcaHC0f0sbwp52448=
configSync:
parts: {}
clickableLinks: {}
accessibility: {}
appearance: {}
hacks: {}
providerBlacklist: []
commandBlacklist: []
enableWelcomeTab: false
pluginBlacklist:
- telnet
- serial
profileDefaults:
ssh:
disableDynamicTitle: true
recoverTabs: false

View File

@ -0,0 +1,334 @@
# ---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# ---------------------------------------------------------------------------------------------
# Prevent the script recursing when setting up
if [[ -n "${VSCODE_SHELL_INTEGRATION:-}" ]]; then
builtin return
fi
VSCODE_SHELL_INTEGRATION=1
# Run relevant rc/profile only if shell integration has been injected, not when run manually
if [ "$VSCODE_INJECTION" == "1" ]; then
if [ -z "$VSCODE_SHELL_LOGIN" ]; then
if [ -r ~/.bashrc ]; then
. ~/.bashrc
fi
else
# Imitate -l because --init-file doesn't support it:
# run the first of these files that exists
if [ -r /etc/profile ]; then
. /etc/profile
fi
# execute the first that exists
if [ -r ~/.bash_profile ]; then
. ~/.bash_profile
elif [ -r ~/.bash_login ]; then
. ~/.bash_login
elif [ -r ~/.profile ]; then
. ~/.profile
fi
builtin unset VSCODE_SHELL_LOGIN
# Apply any explicit path prefix (see #99878)
if [ -n "${VSCODE_PATH_PREFIX:-}" ]; then
export PATH=$VSCODE_PATH_PREFIX$PATH
builtin unset VSCODE_PATH_PREFIX
fi
fi
builtin unset VSCODE_INJECTION
fi
if [ -z "$VSCODE_SHELL_INTEGRATION" ]; then
builtin return
fi
# Apply EnvironmentVariableCollections if needed
if [ -n "${VSCODE_ENV_REPLACE:-}" ]; then
IFS=':' read -ra ADDR <<< "$VSCODE_ENV_REPLACE"
for ITEM in "${ADDR[@]}"; do
VARNAME="$(echo $ITEM | cut -d "=" -f 1)"
VALUE="$(echo -e "$ITEM" | cut -d "=" -f 2-)"
export $VARNAME="$VALUE"
done
builtin unset VSCODE_ENV_REPLACE
fi
if [ -n "${VSCODE_ENV_PREPEND:-}" ]; then
IFS=':' read -ra ADDR <<< "$VSCODE_ENV_PREPEND"
for ITEM in "${ADDR[@]}"; do
VARNAME="$(echo $ITEM | cut -d "=" -f 1)"
VALUE="$(echo -e "$ITEM" | cut -d "=" -f 2-)"
export $VARNAME="$VALUE${!VARNAME}"
done
builtin unset VSCODE_ENV_PREPEND
fi
if [ -n "${VSCODE_ENV_APPEND:-}" ]; then
IFS=':' read -ra ADDR <<< "$VSCODE_ENV_APPEND"
for ITEM in "${ADDR[@]}"; do
VARNAME="$(echo $ITEM | cut -d "=" -f 1)"
VALUE="$(echo -e "$ITEM" | cut -d "=" -f 2-)"
export $VARNAME="${!VARNAME}$VALUE"
done
builtin unset VSCODE_ENV_APPEND
fi
__vsc_get_trap() {
# 'trap -p DEBUG' outputs a shell command like `trap -- '…shellcode…' DEBUG`.
# The terms are quoted literals, but are not guaranteed to be on a single line.
# (Consider a trap like $'echo foo\necho \'bar\'').
# To parse, we splice those terms into an expression capturing them into an array.
# This preserves the quoting of those terms: when we `eval` that expression, they are preserved exactly.
# This is different than simply exploding the string, which would split everything on IFS, oblivious to quoting.
builtin local -a terms
builtin eval "terms=( $(trap -p "${1:-DEBUG}") )"
# |________________________|
# |
# \-------------------*--------------------/
# terms=( trap -- '…arbitrary shellcode…' DEBUG )
# |____||__| |_____________________| |_____|
# | | | |
# 0 1 2 3
# |
# \--------*----/
builtin printf '%s' "${terms[2]:-}"
}
__vsc_escape_value_fast() {
builtin local LC_ALL=C out
out=${1//\\/\\\\}
out=${out//;/\\x3b}
builtin printf '%s\n' "${out}"
}
# The property (P) and command (E) codes embed values which require escaping.
# Backslashes are doubled. Non-alphanumeric characters are converted to escaped hex.
__vsc_escape_value() {
# If the input being too large, switch to the faster function
if [ "${#1}" -ge 2000 ]; then
__vsc_escape_value_fast "$1"
builtin return
fi
# Process text byte by byte, not by codepoint.
builtin local LC_ALL=C str="${1}" i byte token out=''
for (( i=0; i < "${#str}"; ++i )); do
byte="${str:$i:1}"
# Escape backslashes, semi-colons specially, then special ASCII chars below space (0x20)
if [ "$byte" = "\\" ]; then
token="\\\\"
elif [ "$byte" = ";" ]; then
token="\\x3b"
elif (( $(builtin printf '%d' "'$byte") < 31 )); then
token=$(builtin printf '\\x%02x' "'$byte")
else
token="$byte"
fi
out+="$token"
done
builtin printf '%s\n' "${out}"
}
# Send the IsWindows property if the environment looks like Windows
if [[ "$(uname -s)" =~ ^CYGWIN*|MINGW*|MSYS* ]]; then
builtin printf '\e]633;P;IsWindows=True\a'
__vsc_is_windows=1
else
__vsc_is_windows=0
fi
# Allow verifying $BASH_COMMAND doesn't have aliases resolved via history when the right HISTCONTROL
# configuration is used
if [[ "$HISTCONTROL" =~ .*(erasedups|ignoreboth|ignoredups).* ]]; then
__vsc_history_verify=0
else
__vsc_history_verify=1
fi
__vsc_initialized=0
__vsc_original_PS1="$PS1"
__vsc_original_PS2="$PS2"
__vsc_custom_PS1=""
__vsc_custom_PS2=""
__vsc_in_command_execution="1"
__vsc_current_command=""
# It's fine this is in the global scope as it getting at it requires access to the shell environment
__vsc_nonce="$VSCODE_NONCE"
unset VSCODE_NONCE
# Report continuation prompt
builtin printf "\e]633;P;ContinuationPrompt=$(echo "$PS2" | sed 's/\x1b/\\\\x1b/g')\a"
__vsc_report_prompt() {
# Expand the original PS1 similarly to how bash would normally
# See https://stackoverflow.com/a/37137981 for technique
if ((BASH_VERSINFO[0] >= 5 || (BASH_VERSINFO[0] == 4 && BASH_VERSINFO[1] >= 4))); then
__vsc_prompt=${__vsc_original_PS1@P}
else
__vsc_prompt=${__vsc_original_PS1}
fi
__vsc_prompt="$(builtin printf "%s" "${__vsc_prompt//[$'\001'$'\002']}")"
builtin printf "\e]633;P;Prompt=%s\a" "$(__vsc_escape_value "${__vsc_prompt}")"
}
__vsc_prompt_start() {
builtin printf '\e]633;A\a'
}
__vsc_prompt_end() {
builtin printf '\e]633;B\a'
}
__vsc_update_cwd() {
if [ "$__vsc_is_windows" = "1" ]; then
__vsc_cwd="$(cygpath -m "$PWD")"
else
__vsc_cwd="$PWD"
fi
builtin printf '\e]633;P;Cwd=%s\a' "$(__vsc_escape_value "$__vsc_cwd")"
}
__vsc_command_output_start() {
if [[ -z "$__vsc_first_prompt" ]]; then
builtin return
fi
builtin printf '\e]633;E;%s;%s\a' "$(__vsc_escape_value "${__vsc_current_command}")" $__vsc_nonce
builtin printf '\e]633;C\a'
}
__vsc_continuation_start() {
builtin printf '\e]633;F\a'
}
__vsc_continuation_end() {
builtin printf '\e]633;G\a'
}
__vsc_command_complete() {
if [[ -z "$__vsc_first_prompt" ]]; then
builtin return
fi
if [ "$__vsc_current_command" = "" ]; then
builtin printf '\e]633;D\a'
else
builtin printf '\e]633;D;%s\a' "$__vsc_status"
fi
__vsc_update_cwd
}
__vsc_update_prompt() {
# in command execution
if [ "$__vsc_in_command_execution" = "1" ]; then
# Wrap the prompt if it is not yet wrapped, if the PS1 changed this this was last set it
# means the user re-exported the PS1 so we should re-wrap it
if [[ "$__vsc_custom_PS1" == "" || "$__vsc_custom_PS1" != "$PS1" ]]; then
__vsc_original_PS1=$PS1
__vsc_custom_PS1="\[$(__vsc_prompt_start)\]$__vsc_original_PS1\[$(__vsc_prompt_end)\]"
PS1="$__vsc_custom_PS1"
fi
if [[ "$__vsc_custom_PS2" == "" || "$__vsc_custom_PS2" != "$PS2" ]]; then
__vsc_original_PS2=$PS2
__vsc_custom_PS2="\[$(__vsc_continuation_start)\]$__vsc_original_PS2\[$(__vsc_continuation_end)\]"
PS2="$__vsc_custom_PS2"
fi
__vsc_in_command_execution="0"
fi
}
__vsc_precmd() {
__vsc_command_complete "$__vsc_status"
__vsc_current_command=""
__vsc_report_prompt
__vsc_first_prompt=1
__vsc_update_prompt
}
__vsc_preexec() {
__vsc_initialized=1
if [[ ! $BASH_COMMAND == __vsc_prompt* ]]; then
# Use history if it's available to verify the command as BASH_COMMAND comes in with aliases
# resolved
if [ "$__vsc_history_verify" = "1" ]; then
__vsc_current_command="$(builtin history 1 | sed 's/ *[0-9]* *//')"
else
__vsc_current_command=$BASH_COMMAND
fi
else
__vsc_current_command=""
fi
__vsc_command_output_start
}
# Debug trapping/preexec inspired by starship (ISC)
if [[ -n "${bash_preexec_imported:-}" ]]; then
__vsc_preexec_only() {
if [ "$__vsc_in_command_execution" = "0" ]; then
__vsc_in_command_execution="1"
__vsc_preexec
fi
}
precmd_functions+=(__vsc_prompt_cmd)
preexec_functions+=(__vsc_preexec_only)
else
__vsc_dbg_trap="$(__vsc_get_trap DEBUG)"
if [[ -z "$__vsc_dbg_trap" ]]; then
__vsc_preexec_only() {
if [ "$__vsc_in_command_execution" = "0" ]; then
__vsc_in_command_execution="1"
__vsc_preexec
fi
}
trap '__vsc_preexec_only "$_"' DEBUG
elif [[ "$__vsc_dbg_trap" != '__vsc_preexec "$_"' && "$__vsc_dbg_trap" != '__vsc_preexec_all "$_"' ]]; then
__vsc_preexec_all() {
if [ "$__vsc_in_command_execution" = "0" ]; then
__vsc_in_command_execution="1"
__vsc_preexec
builtin eval "${__vsc_dbg_trap}"
fi
}
trap '__vsc_preexec_all "$_"' DEBUG
fi
fi
__vsc_update_prompt
__vsc_restore_exit_code() {
return "$1"
}
__vsc_prompt_cmd_original() {
__vsc_status="$?"
__vsc_restore_exit_code "${__vsc_status}"
# Evaluate the original PROMPT_COMMAND similarly to how bash would normally
# See https://unix.stackexchange.com/a/672843 for technique
local cmd
for cmd in "${__vsc_original_prompt_command[@]}"; do
eval "${cmd:-}"
done
__vsc_precmd
}
__vsc_prompt_cmd() {
__vsc_status="$?"
__vsc_precmd
}
# PROMPT_COMMAND arrays and strings seem to be handled the same (handling only the first entry of
# the array?)
__vsc_original_prompt_command=${PROMPT_COMMAND:-}
if [[ -z "${bash_preexec_imported:-}" ]]; then
if [[ -n "${__vsc_original_prompt_command:-}" && "${__vsc_original_prompt_command:-}" != "__vsc_prompt_cmd" ]]; then
PROMPT_COMMAND=__vsc_prompt_cmd_original
else
PROMPT_COMMAND=__vsc_prompt_cmd
fi
fi

View File

@ -0,0 +1,90 @@
{
"models": [
{
"title": "Ollama",
"provider": "ollama",
"model": "AUTODETECT"
}
],
"slashCommands": [
{
"name": "edit",
"description": "Edit selected code"
},
{
"name": "comment",
"description": "Write comments for the selected code"
},
{
"name": "share",
"description": "Export this session as markdown"
},
{
"name": "cmd",
"description": "Generate a shell command"
}
],
"customCommands": [
{
"name": "test",
"prompt": "Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.",
"description": "Write unit tests for highlighted code"
}
],
"contextProviders": [
{
"name": "code",
"params": {}
},
{
"name": "docs",
"params": {}
},
{
"name": "diff",
"params": {}
},
{
"name": "open",
"params": {}
},
{
"name": "terminal",
"params": {}
},
{
"name": "problems",
"params": {}
},
{
"name": "folder",
"params": {}
},
{
"name": "codebase",
"params": {}
}
],
"tabAutocompleteModel": {
"title": "Starcoder-2-3b",
"provider": "ollama",
"apiBase": "http://localhost:11434",
"model": "codecomplete:3b",
"completionOptions": {
"stop": ["<file_sep>"],
"maxTokens": 600
}
},
"tabAutocompleteOptions": {
"maxPromptTokens": 16000,
"useCopyBuffer": true,
"useSuffix": true,
"useOtherFiles": true
},
"allowAnonymousTelemetry": true,
"embeddingsProvider": {
"provider": "ollama",
"model": "embed:335m",
"apiBase": "http://localhost:11434"
}
}

View File

@ -0,0 +1,65 @@
{
"explorer.confirmDelete": false,
"git.autofetch": true,
"git.confirmSync": false,
"security.workspace.trust.untrustedFiles": "open",
"terminal.integrated.defaultProfile.linux": "bash",
"terminal.integrated.profiles.linux": {
"bash": {
"path": "/usr/bin/zsh",
"args": ["-l"],
"overrideName": true
},
"JavaScript Debug Terminal": null
},
"python.diagnostics.sourceMapsEnabled": true,
"tabnine.experimentalAutoImports": true,
"tabnine.disableFileRegex": [
],
"git.enableSmartCommit": true,
"workbench.startupEditor": "none",
"continue.enableTabAutocomplete": false,
"explorer.confirmDragAndDrop": false,
"workbench.colorCustomizations": {
"focusBorder": "#31363B",
"foreground": "#EFF0F1",
"disabledForeground": "#EFF0F1",
"widget.border": "#00000000",
"widget.shadow": "#00000000",
"selection.background": "#6992C1",
"descriptionForeground": "#EFF0F1",
"errorForeground": "#da4453",
"icon.foreground": "#EFF0F1",
"sash.hoverBorder": "#6992C1",
"window.activeBorder": "#292D32",
"window.inactiveBorder": "#292D32",
"textBlockQuote.background": "#292D32",
"textCodeBlock.background": "#292D32",
"textLink.activeForeground": "#EFF0F1",
"textLink.foreground": "#EFF0F1",
"toolbar.hoverBackground": "#31363B",
"toolbar.hoverOutline": "#6992C1",
"toolbar.activeBackground": "#6992C1",
"sideBar.background": "#292D32",
"editor.background": "#222528",
"sideBarSectionHeader.background": "#292D32",
"list.activeSelectionBackground": "#6992C1",
"list.activeSelectionForeground": "#EFF0F1",
"list.focusBackground": "#1E485D",
"list.hoverBackground": "#1E485D",
"list.hoverForeground": "#EFF0F1",
"activityBar.background": "#292D32",
"editorGroup.border": "#EFF0F1",
"editorGroup.dropBackground": "#404345",
"editorGroupHeader.noTabsBackground": "#292D32",
"editorGroupHeader.tabsBackground": "#292D32",
"editorGroupHeader.tabsBorder": "#292D32",
"tab.activeBackground": "#31363B",
"tab.inactiveBackground": "#292D32",
"tab.hoverBackground": "#404345",
"statusBar.background": "#292D32"
},
"terminal.external.linuxExec": "konsole",
"terminal.explorerKind": "both",
"window.confirmSaveUntitledWorkspace": false
}

View File

@ -0,0 +1,27 @@
# Rename this file to 70-wooting.rules and place under /etc/udev/rules.d/
# Wooting One Legacy
SUBSYSTEM=="hidraw", ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="ff01", TAG+="uaccess"
SUBSYSTEM=="usb", ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="ff01", TAG+="uaccess"
# Wooting One update mode
SUBSYSTEM=="hidraw", ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="2402", TAG+="uaccess"
# Wooting Two Legacy
SUBSYSTEM=="hidraw", ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="ff02", TAG+="uaccess"
SUBSYSTEM=="usb", ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="ff02", TAG+="uaccess"
# Wooting Two update mode
SUBSYSTEM=="hidraw", ATTRS{idVendor}=="03eb", ATTRS{idProduct}=="2403", TAG+="uaccess"
# Generic Wootings
SUBSYSTEM=="hidraw", ATTRS{idVendor}=="31e3", TAG+="uaccess"
SUBSYSTEM=="usb", ATTRS{idVendor}=="31e3", TAG+="uaccess"

View File

@ -0,0 +1,69 @@
# Run a big copy operation with Rsync
Run below as root or superuser:
`rsync -avhW $FROM_DIR $TO_DIR > ~/copy.tmp`
- `-a` Archive mode (recursive, copy symlinks, preserve permissions, preserve modification times, preserve group, preserve owner, preserve device files).
- `-v` Verbose mode (print path of each file copied).
- `-h` Human readable (format numbers to be human-readable).
- `-W` Whole file (copy files whole, do not use transfer delta algorithm).
- `$FROM_DIR` Source directory, with trailing slash. E.g. `/mnt/Media/Media/`
- `$TO_DIR` Destination directory, with trailing slash. E.g. `/mnt/TEMP/Media/`
- `> ~/copy.tmp` Sends stdout to a file which can be used for scripting.
# Send a brief email notification when copy complete
```
( echo "Subject: Copy $FROM_DIR to $TO_DIR operation complete."
echo "Mime-Version: 1.0"
echo "Content-Type: multipart/mixed; boundary=\"d29a0c638b540b23e9a29a3a9aebc900aeeb6a82\""
echo "Content-Transfer-Encoding: 7bit"
echo ""
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82"
echo "Content-Type: text/html; charset=\"UTF-8\""
echo "Content-Transfer-Encoding: 7bit"
echo "Content-Disposition: inline"
echo ""
echo "Copy $FROM_DIR to $TO_DIR complete. See attached log for details about what was copied."
echo ""
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82"
echo "Content-Type: text/plain"
echo "Content-Transfer-Encoding: base64"
echo "Content-Disposition: attachment; filename=\"log.txt\""
echo ""
base64 "copy.tmp"
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82--"
) | sendmail root
```
# Send a verbose email notification when copy complete
This should be set to run after the copy completes (e.g. in a script, or with an `&&`).
```
if [ $(ls -la copy.tmp | awk '{print $5}') -le 23000000 ]; then
( echo "Subject: Copy $FROM_DIR to $TO_DIR operation complete."
echo "Mime-Version: 1.0"
echo "Content-Type: multipart/mixed; boundary=\"d29a0c638b540b23e9a29a3a9aebc900aeeb6a82\""
echo "Content-Transfer-Encoding: 7bit"
echo ""
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82"
echo "Content-Type: text/html; charset=\"UTF-8\""
echo "Content-Transfer-Encoding: 7bit"
echo "Content-Disposition: inline"
echo ""
echo "Copy $FROM_DIR to $TO_DIR complete. See attached log for details about what was copied."
echo ""
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82"
echo "Content-Type: text/plain"
echo "Content-Transfer-Encoding: base64"
echo "Content-Disposition: attachment; filename=\"log.txt\""
echo ""
base64 "copy.tmp"
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82--"
) | sendmail root
rm /tmp/copy.tmp
else
echo "Filesize too large to attach. See log file for details." | mail -s "Copy $FROM_DIR to $TO_DIR operation complete." root
mv copy.tmp copy_$(date +"%y-%m-%d").log
fi
```

View File

@ -0,0 +1,149 @@
We can get each drive's serial number, power on hours, and manufacture date with the following one-liner:
```
for dev in {b..y}; \
do echo "### /dev/sd$dev" && \
sudo smartctl -a /dev/sd$dev | awk '/Serial number:/{serial=$NF} /hours:minutes/{powerontime=$NF} {FS="\n"}/Manufactured in /{manufacture=$NF}END{print serial; print powerontime; print manufacture; printf "\n"}'; \
done
```
Make sure to update this list *only* when a disk is newly installed. Also note the date of installation for the disk.
Or, if you're only updating one drive:
1. Set the `$dev` variable to the drive letter you want to check (e.g. for `/dev/sdr`, use `dev=r`)
2. Run the one-liner: `smartctl -a /dev/sd$dev | awk '/Serial number:/{serial=$NF} /hours:minutes/{powerontime=$NF} {FS="\n"}/Manufactured in /{manufacture=$NF}END{print serial; print powerontime; print manufacture; printf "\n"}'`
## Disk Info
### /dev/sdb
Serial number: VJGPS30X
Accumulated power on time, hours:minutes 48962:49
Manufactured in week 10 of year 2017
### /dev/sdc
Serial number: VK0ZD6ZY
Accumulated power on time, hours:minutes 32709:42
Manufactured in week 03 of year 2017
### /dev/sdd (INSTALLED 2023/07/20)
Serial number: VKJWPAEX
Accumulated power on time, hours:minutes 44760:00
Manufactured in week 22 of year 2016
### /dev/sde
Serial number: VJG2PVRX
Accumulated power on time, hours:minutes 47505:55
Manufactured in week 36 of year 2016
### /dev/sdf
Serial number: VJGR6TNX
Accumulated power on time, hours:minutes 48957:25
Manufactured in week 10 of year 2017
### /dev/sdg
Serial number: 2EG14YNJ
Accumulated power on time, hours:minutes 32640:40
Manufactured in week 49 of year 2014
### /dev/sdh (INSTALLED 2023/06/25)
Serial number: VJGJVTZX
Accumulated power on time, hours:minutes 35808:32
Manufactured in week 07 of year 2017
### /dev/sdi
Serial number: VJG1H9UX
Accumulated power on time, hours:minutes 47504:12
Manufactured in week 33 of year 2016
### /dev/sdj (INSTALLED 2023/06/24)
Serial number: VJGJUWNX
Accumulated power on time, hours:minutes 35913:53
Manufactured in week 07 of year 2017
### /dev/sdk
Serial number: 2EGXD27V
Accumulated power on time, hours:minutes 35390:13
Manufactured in week 44 of year 2015
### /dev/sdl (INSTALLED 2023/06/25)
Serial number: VJGJAS1X
Accumulated power on time, hours:minutes 35811:54
Manufactured in week 07 of year 2017
### /dev/sdm
Serial number: VJG2UTUX
Accumulated power on time, hours:minutes 47569:09
Manufactured in week 36 of year 2016
### /dev/sdn
Serial number: VJGRGD2X
Accumulated power on time, hours:minutes 49043:40
Manufactured in week 10 of year 2017
### /dev/sdo
Serial number: 001526PL8AVV 2EGL8AVV
Accumulated power on time, hours:minutes 55129:17
Manufactured in week 26 of year 2015
### /dev/sdp
Serial number: 2EKA903X
Accumulated power on time, hours:minutes 45174:35
Manufactured in week 53 of year 2015
### /dev/sdq
Serial number: VJGRRG9X
Accumulated power on time, hours:minutes 49911:22
Manufactured in week 10 of year 2017
### /dev/sdr
Serial number: VKH40L6X
Accumulated power on time, hours:minutes 46115:13
Manufactured in week 10 of year 2016
### /dev/sdr (INSTALLED 2023/07/19)
Serial number: VJGK56KX
Accumulated power on time, hours:minutes 35870:47
Manufactured in week 07 of year 2017
### /dev/sds
Serial number: 001528PNPVWV 2EGNPVWV
Accumulated power on time, hours:minutes 23197:56
Manufactured in week 28 of year 2015
### /dev/sdt
Serial number: 2EKATR2X
Accumulated power on time, hours:minutes 45173:20
Manufactured in week 53 of year 2015
### /dev/sdu
Serial number: VKH3Y3XX
Accumulated power on time, hours:minutes 57672:16
Manufactured in week 10 of year 2016
### /dev/sdv
Serial number: 001703PV9N8V VLKV9N8V
Accumulated power on time, hours:minutes 51699:11
Manufactured in week 03 of year 2017
### /dev/sdw
Serial number: 001708P4W2VV R5G4W2VV
Accumulated power on time, hours:minutes 26289:03
Manufactured in week 08 of year 2017
### /dev/sdx
Serial number: 2EKA92XX
Accumulated power on time, hours:minutes 45175:01
Manufactured in week 53 of year 2015
### /dev/sdy
Serial number: VKGW5YGX
Accumulated power on time, hours:minutes 57740:50
Manufactured in week 09 of year 2016
## Gettin Graphic
```
for dev in {b..y}; \
do smartctl -a /dev/sd$dev | awk '/Serial number:/{serial=$NF} /hours:minutes/{powerontime=$NF} /Manufactured in /{manufacture=$NF}END{print serial; print powerontime; print manufacture; printf "\n"}'; \
done
```

View File

@ -0,0 +1,63 @@
# Physical Disk Locations (DS4243)
*Updated 2024/02/28*
Each cell contains the serial number for the drive in the mapped bay.
| | X1 | X2 | X3 | X4 |
|:--:|:---------:|:--------:|:--------:|:--------:|
| Y1 | VJGPS30X | VK0ZD6ZY | VKH22XPX | VJG2PVRX |
| Y2 | VJGR6TNX | 2EG14YNJ | VJGJVTZX | VJG1H9UX |
| Y3 | VJGJUWNX | 2EGXD27V | VJGJAS1X | VJG2UTUX |
| Y4 | VJGRGD2X | 2EGL8AVV | 2EKA903X | VJGRRG9X |
| Y5 | VJGK56KX | 2EGNPVWV | VJG1NP9X | VKH3Y3XX |
| Y6 | VLKV9N8V | R5G4W2VV | VLKXPS1V | VKGW5YGX |
# Identify a Failing Disk
Disk Smart test errors are reported by device ID (e.g. /dev/sdw), rather than the serial number. To find the serial number associated with a particular device ID, run the following one-liner with `$dev` substituted for the device to find:
`TODO`
# Get Serial Number from part-uuid
`ls -l /dev/disk/by-partuuid`
Will return lines for each partition device and its mapping to a `/dev/sd` Linux block device.
From there, run `smartctl -a <block device> | grep Serial` where `<block device>` is like `/dev/sdw`.
Or, as a one-liner with `$DISK_UUID` set to the UUID to find:
`ls -l /dev/disk/by-partuuid | grep $DISK_UUID | cut -d' ' -f 11 | xargs basename | sed 's/^/\/dev\//' | xargs sudo smartctl -a | grep Serial | tr -s ' ' | cut -d' ' -f 3`
It might be possible to pull the part UUID from the `zpool status` command directly. An exercise for the reader.
# Offline and wipe the failing disk
0. Match the disk name (e.g. `/dev/sdw`) to the UUID (e.g. `13846695584571018356`). Use `lsblk --fs` for this.
1. Offline the disk: `zpool offline $pool $disk_id`
2. Wipe the disk: `wipefs $disklabel` (where `$disklabel` is like `/dev/sdw`)
3. Run `lsblk --fs` again to verify the wipe worked. If not, you'll need to run a full dd wipe with `dd if=/dev/zero of=$disklabel bs=1M`. This will take a long time as it writes zeroes across the entire drive.
4. Physically remove the disk.
# Replace Disk in Pool
Once the failed disk has been identified and physically replaced, you should know the old drive's UUID (via `zpool status`) and the new drive's device name (via `lsblk` and deduction)
Once the new drive is in place and you know its ID (e.g. `/dev/sdw`), run the following to begin the resilver process:
`zpool replace <pool> <part-uuid to be replace> <device id of new drive>`
E.g. `zpool replace Media d50abb30-81fd-49c6-b22e-43fcee2022fe /dev/sdx`
This will begin a new resilver operation. Good luck!
https://docs.oracle.com/cd/E19253-01/819-5461/gazgd/index.html
# Update Log
**Most recent first**
- *2024/10/21*: Replaced 2EKATR2X with VJG1NP9X at Y5/X3
- *2024/09/09*: Replaced VLKV9N8V with VKH3XR2X at Y6/X1
- *2024/05/26*: Replaced 2EGL8AVV with VJG2808X at Y4/X2
- *2024/04/16*: Replaced VJG1H9UX with 2EKA92XX at Y2/X4
- *2024/04/07*: Replaced VJG282NX with VKH22XPX at Y1/X3
- *2024/03/12*: Replaced VLKXPS1V with VKH40L6X at Y6/X3
- *2024/02/28*: Replaced 2EKA92XX with VLKXPS1V at Y6/X3
- *2024/02/27*: Replaced VJG2T4YX with VJG282NX at Y2/X3

View File

@ -0,0 +1,451 @@
# Steps Taken
We hooked up 8 "spare" drives as a janky temporary pool. They're literally stacked 4-high on top of the chassis. It's awful. We created a RAID-Z2 pool called `TEMP` with these drives. We then copied all data off the `Media` pool with `sudo rsync -avhW /mnt/Media/Media/ /mnt/TEMP/Media/`. It was interrupted once when the PC running the SSH session experienced a crash to black. We reconnected the session and ran the same command. It resumed just fine. The copy completed in around 48 hours with interruptions accounted for. Reported copy speed was 285MB/s.
Next we destroy the `Media` pool. [TrueNAS docs](https://www.truenas.com/docs/scale/scaletutorials/storage/managepoolsscale/#exporting/disconnecting-or-deleting-a-pool). We open the Storage Dashboard and click "Export/Disconnect" for the `Media` pool. Before confirming, we take note of the services that will be disrupted by the deletion (to be recreated later):
```
These services depend on pool Media and will be disrupted if the pool is detached:
SMB Share:
Media
AV
Snapshot Task:
Media/AV
Media/Media
Rsync Task:
/mnt/Media/Media/Video/HomeVideos
/mnt/Media/Media/Images/
/mnt/Media/Media/Video/Recordings/
```
We check all three boxes for destroying the data, deleting the share configurations, and to confirm the export/disconnect. Type the pool name into the confirm box, and hit the big red button.
At this point my exhaustion 48 hours ago bit me in the ass. When I created the TEMP pool, I thoughtlessly added the two drives which had been removed from Media to create a 10-wide RAID-Z2. Okay, so we've got a rough situation on our hands. I see two possibilities:
1. Offline the two misplaced drives from TEMP, create a 12-wide Media pool, and begin the copy. Life is for the living.
2. Just order a couple more drives on Ebay, replace the two misplaced drives, resilver, and continue as planned.
I carefully calculated that the number of times you live is once, so we're flying by the seat of our pants.
We identify which drives are in the wrong pool by running `sudo zpool status TEMP`, finding each part-uuid in the `/dev/disk/by-partuuid` directory, where it's symlinked to a standard Linux partition name (e.g. `/dev/sda1`). From there, we run `smartctl -a` against the device name and filter to get the serial number. Then we check each serial number against the table in [diskshelfmap](DISKSHELFMAP.md). I wrote a one-liner.
```sh
for id in $(sudo zpool status TEMP | grep -E "[0-9a-fA-F]{8}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{12} (ONLINE|DEGRADED)" | tr -s ' ' | cut -d' ' -f 2); do
echo -n "$id -> ";
ls -l /dev/disk/by-partuuid |\
grep $id |\
cut -d' ' -f 12 |\
cut -d'/' -f 3 |\
sed 's/^/\/dev\//' |\
xargs sudo smartctl -a |\
grep Serial |\
tr -s ' ' |\
cut -d' ' -f 3
done
```
```sh
for id in $(sudo zpool status TEMP | grep -E "[0-9a-fA-F]{8}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{12} (ONLINE|DEGRADED)" | tr -s ' ' | cut -d' ' -f 2); do echo -n "$id -> "; ls -l /dev/disk/by-partuuid | grep $id | cut -d' ' -f 12 | cut -d'/' -f 3 | sed 's/^/\/dev\//' | xargs sudo smartctl -a | grep Serial | tr -s ' ' | cut -d' ' -f 3; done
```
Output:
```
dad98d96-3cbe-469e-b262-b8416dfc72ec -> 2EKA92XX
0fabfe6b-5305-4711-921c-926110df24b7 -> VJG282NX
864e8c2d-0925-4018-b440-81807d3c5c9a -> VJG2T4YX
d7b9a2ec-5f26-4649-a7fb-cb1ae953825e -> VKH3XR2X
dd453900-d8c0-430d-bc1f-c022e62417ae -> 001703PXPS1V
507666cd-91e2-4960-af02-b15899a22487 -> VJG1NP9X
5eaf90b6-0ad1-4ec0-a232-d704c93dae9a -> VKH40L6X
cf9cc737-a704-4bea-bcee-db2cfe4490b7 -> VJG2808X
50cc36be-001d-4e00-a0ca-e4b557bd6852 -> VKHNH0GX
142d45e8-4f30-4492-b01f-f22cba129fee -> VKJWPAEX
```
At time of error, our disk shelf map looks like:
| | X1 | X2 | X3 | X4 |
|:--:|:---------:|:--------:|:--------:|:--------:|
| Y1 | VJGPS30X | VK0ZD6ZY | VJG282NX | VJG2PVRX |
| Y2 | VJGR6TNX | 2EG14YNJ | VJGJVTZX | VJG1H9UX |
| Y3 | VJGJUWNX | 2EGXD27V | VJGJAS1X | VJG2UTUX |
| Y4 | VJGRGD2X | 2EGL8AVV | 2EKA903X | VJGRRG9X |
| Y5 | VJGK56KX | 2EGNPVWV | 2EKATR2X | VKH3Y3XX |
| Y6 | VLKV9N8V | R5G4W2VV | VLKXPS1V | VKGW5YGX |
So our matches are:
- Serial: `VJG282NX`, partuuid: `0fabfe6b-5305-4711-921c-926110df24b7`, shelf coordinates: Y1/X3
Hmm. That's it? Something's unaccounted for. Also one of those drives is weird. Let's check the full `smartctl` output for that drive.
`id=dd453900-d8c0-430d-bc1f-c022e62417ae; ls -l /dev/disk/by-partuuid | grep $id | cut-d' ' -f 11 | xargs basename | sed 's/^/\/dev\//' | xargs sudo smartctl -a` returns a normal-looking output.
Except the serial is a little weird.
```
Serial number: 001703PXPS1V VLKXPS1V
```
Huh. Interesting. That maps to our Y6/X3 serial. I wonder how that happened.
So our *actual* matches are:
- Serial: `VJG282NX`, partuuid: `0fabfe6b-5305-4711-921c-926110df24b7`, shelf coordinates: Y1/X3
- Serial: `VLKXPS1V`, partuuid: `dd453900-d8c0-430d-bc1f-c022e62417ae`, shelf coordinates: Y6/X3
So we have our two misplaced drives. Just as a sanity check, we'll physically remove each drive (one at a time) and make sure the correct devices are disappearing from the pool.
We remove the drive at Y1/X3, then run `zpool status TEMP` and we see `0fabfe6b-5305-4711-921c-926110df24b7 REMOVED`. That matches. Cool. Plug it back in and wait for it to go back to ONLINE status.
Next we remove the drive at Y6/X3. Same test, `zpool status TEMP` which contains `dd453900-d8c0-430d-bc1f-c022e62417ae REMOVED`. Dope. All looking good. Plug it back in and wait for return to normal.
We wait a few seconds for the drive to come back online. We check `zpool status TEMP` and we see something we weren't expecting:
```
pool: TEMP
state: ONLINE
status: One or more devices is currently being resilvered. The pool will
continue to function, possibly in a degraded state.
action: Wait for the resilver to complete.
scan: resilver in progress since Fri Mar 1 22:47:28 2024
0B scanned at 0B/s, 0B issued at 0B/s, 58.1T total
0B resilvered, 0.00% done, no estimated completion time
config:
NAME STATE READ WRITE CKSUM
TEMP ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
dad98d96-3cbe-469e-b262-b8416dfc72ec ONLINE 0 0 0
0fabfe6b-5305-4711-921c-926110df24b7 ONLINE 0 0 0
864e8c2d-0925-4018-b440-81807d3c5c9a ONLINE 0 0 0
d7b9a2ec-5f26-4649-a7fb-cb1ae953825e ONLINE 0 0 0
dd453900-d8c0-430d-bc1f-c022e62417ae ONLINE 0 0 0 (awaiting resilver)
507666cd-91e2-4960-af02-b15899a22487 ONLINE 0 0 0
5eaf90b6-0ad1-4ec0-a232-d704c93dae9a ONLINE 0 0 0
cf9cc737-a704-4bea-bcee-db2cfe4490b7 ONLINE 0 0 0
50cc36be-001d-4e00-a0ca-e4b557bd6852 ONLINE 0 0 0
142d45e8-4f30-4492-b01f-f22cba129fee ONLINE 0 0 0
errors: No known data errors
```
It gets weirder though. A few minutes pass and we get `zpool status TEMP`:
```
pool: TEMP
state: ONLINE
status: One or more devices is currently being resilvered. The pool will
continue to function, possibly in a degraded state.
action: Wait for the resilver to complete.
scan: resilver in progress since Fri Mar 1 22:51:23 2024
0B scanned at 0B/s, 0B issued at 0B/s, 58.1T total
0B resilvered, 0.00% done, no estimated completion time
config:
NAME STATE READ WRITE CKSUM
TEMP ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
dad98d96-3cbe-469e-b262-b8416dfc72ec ONLINE 0 0 0
0fabfe6b-5305-4711-921c-926110df24b7 ONLINE 0 0 0
864e8c2d-0925-4018-b440-81807d3c5c9a ONLINE 0 0 0
d7b9a2ec-5f26-4649-a7fb-cb1ae953825e ONLINE 0 0 0
dd453900-d8c0-430d-bc1f-c022e62417ae ONLINE 0 0 0
507666cd-91e2-4960-af02-b15899a22487 ONLINE 0 0 0
5eaf90b6-0ad1-4ec0-a232-d704c93dae9a ONLINE 0 0 0
cf9cc737-a704-4bea-bcee-db2cfe4490b7 ONLINE 0 0 0
50cc36be-001d-4e00-a0ca-e4b557bd6852 ONLINE 0 0 0
142d45e8-4f30-4492-b01f-f22cba129fee ONLINE 0 0 0
errors: No known data errors
```
No drives awaiting resilver. But the resilver claims to be in progress with zero bytes scanned.
A quick `zpool clear TEMP` doesn't change anything. Same with `zpool resilver TEMP`. We'll give a reboot a shot.
Huh, alright.
```
pool: TEMP
state: ONLINE
scan: resilvered 1.18M in 00:07:53 with 0 errors on Fri Mar 1 22:59:16 2024
config:
NAME STATE READ WRITE CKSUM
TEMP ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
dad98d96-3cbe-469e-b262-b8416dfc72ec ONLINE 0 0 0
0fabfe6b-5305-4711-921c-926110df24b7 ONLINE 0 0 0
864e8c2d-0925-4018-b440-81807d3c5c9a ONLINE 0 0 0
d7b9a2ec-5f26-4649-a7fb-cb1ae953825e ONLINE 0 0 0
dd453900-d8c0-430d-bc1f-c022e62417ae ONLINE 0 0 0
507666cd-91e2-4960-af02-b15899a22487 ONLINE 0 0 0
5eaf90b6-0ad1-4ec0-a232-d704c93dae9a ONLINE 0 0 0
cf9cc737-a704-4bea-bcee-db2cfe4490b7 ONLINE 0 0 0
50cc36be-001d-4e00-a0ca-e4b557bd6852 ONLINE 0 0 0
142d45e8-4f30-4492-b01f-f22cba129fee ONLINE 0 0 0
errors: No known data errors
```
Okay, now we need to offline those two drives to make them available to use in the new Media pool.
We could follow the [TrueNAS docs](https://www.truenas.com/docs/core/coretutorials/storage/disks/diskreplace/#taking-a-failed-disk-offline) web UI instructions, but I prefer the CLI. So instead we'll reference [Oracle's docs](https://docs.oracle.com/cd/E19253-01/819-5461/gazgm/index.html). So we have two commands to run:
- `zpool offline TEMP 0fabfe6b-5305-4711-921c-926110df24b7`
- `zpool offline TEMP dd453900-d8c0-430d-bc1f-c022e62417ae`
And as expected we're now in a degraded state.
```
pool: TEMP
state: DEGRADED
status: One or more devices has been taken offline by the administrator.
Sufficient replicas exist for the pool to continue functioning in a
degraded state.
action: Online the device using 'zpool online' or replace the device with
'zpool replace'.
scan: resilvered 1.18M in 00:07:53 with 0 errors on Fri Mar 1 22:59:16 2024
config:
NAME STATE READ WRITE CKSUM
TEMP DEGRADED 0 0 0
raidz2-0 DEGRADED 0 0 0
dad98d96-3cbe-469e-b262-b8416dfc72ec ONLINE 0 0 0
0fabfe6b-5305-4711-921c-926110df24b7 OFFLINE 0 0 0
864e8c2d-0925-4018-b440-81807d3c5c9a ONLINE 0 0 0
d7b9a2ec-5f26-4649-a7fb-cb1ae953825e ONLINE 0 0 0
dd453900-d8c0-430d-bc1f-c022e62417ae OFFLINE 0 0 0
507666cd-91e2-4960-af02-b15899a22487 ONLINE 0 0 0
5eaf90b6-0ad1-4ec0-a232-d704c93dae9a ONLINE 0 0 0
cf9cc737-a704-4bea-bcee-db2cfe4490b7 ONLINE 0 0 0
50cc36be-001d-4e00-a0ca-e4b557bd6852 ONLINE 0 0 0
142d45e8-4f30-4492-b01f-f22cba129fee ONLINE 0 0 0
errors: No known data errors
```
Now if I did my homework properly, we should be able to build a new pool which contains the offlined drives. And sure enough the web UI corroborates.
We navigate to the Storage page, then click "Create Pool". We add all available drives (`sdd sdf sdh sdj sdl sdp sdq sdr sdt sdu sdx sdy`) to a data vdev in a RAID-Z2 configuration. We name the pool `Media`.
And we hit Create, check the confirm box, and click Create Pool. It only takes a few seconds and we're back in business. Our new pool has one disk failing SMART tests, but we're going to tolerate that for now.
We recreate our datasets with default settings and begin the copy back from `TEMP` to `Media`. We'll use a slightly more sophisticated strategy for copying back. We run the [`copy.sh` ](./copy.sh) script from a remote SSH session with nohup.
`ssh admin@192.168.1.10 nohup ~/copy.sh /mnt/TEMP/Media/ /mnt/Media/Media/`
And we wait. Painfully. We can check in occasionally with `tail -f ~/copy.tmp`, and we should get an email notification when the command completes.
Compare disk usage and file count between directories:
```
CHECKPATH="Sub/Directory";
echo "Source: /mnt/TEMP/$CHECKPATH";
echo -n " Disk usage: " && sudo du -s /mnt/TEMP/$CHECKPATH;
echo -n " File count: " && sudo find /mnt/TEMP/$CHECKPATH -type f | wc -l;
echo "Dest: /mnt/Media/$CHECKPATH";
echo -n " Disk usage: " && sudo du -s /mnt/Media/$CHECKPATH;
echo -n " File count: " && sudo find /mnt/Media/$CHECKPATH -type f | wc -l
```
Final output of `sudo zpool status Media TEMP`
```
pool: Media
state: DEGRADED
status: One or more devices are faulted in response to persistent errors.
Sufficient replicas exist for the pool to continue functioning in a
degraded state.
action: Replace the faulted device, or use 'zpool clear' to mark the device
repaired.
scan: resilvered 9.19M in 00:00:03 with 0 errors on Sun Mar 3 10:30:11 2024
config:
NAME STATE READ WRITE CKSUM
Media DEGRADED 0 0 0
raidz2-0 DEGRADED 0 0 0
a9df1c82-cc15-4971-8080-42056e6213dd ONLINE 0 0 0
8398ae95-9119-4dd6-ab3a-5c0af82f82f4 ONLINE 0 0 0
44ae3ae0-e8f9-4dbc-95ba-e64f63ab7460 ONLINE 0 0 0
eda6547f-9f25-4904-a5bd-8f8b4e36d859 ONLINE 0 0 0
05241f52-542c-4c8c-8f20-d34d2878c41a ONLINE 0 0 0
38cd7315-e269-4acc-a05b-e81362a9ea39 ONLINE 0 0 0
d50abb30-81fd-49c6-b22e-43fcee2022fe FAULTED 23 0 0 too many errors
90be0e9e-7af1-4930-9437-c36c24ea81c5 ONLINE 0 0 0
29b36c4c-8ad2-4dcb-9b56-08f5458817d2 ONLINE 0 0 0
d59a8281-618d-4bab-bd22-9f9f377baacf ONLINE 0 0 0
e0431a50-b5c6-459e-85bd-d648ec2c21d6 ONLINE 0 0 0
cd4808a8-a137-4121-a5ff-4181faadee64 ONLINE 0 0 0
errors: No known data errors
pool: TEMP
state: DEGRADED
status: One or more devices has experienced an error resulting in data
corruption. Applications may be affected.
action: Restore the file in question if possible. Otherwise restore the
entire pool from backup.
see: https://openzfs.github.io/openzfs-docs/msg/ZFS-8000-8A
scan: scrub repaired 3.17M in 14:29:34 with 4 errors on Sun Mar 3 14:29:35 2024
config:
NAME STATE READ WRITE CKSUM
TEMP DEGRADED 0 0 0
raidz2-0 DEGRADED 945K 0 0
dad98d96-3cbe-469e-b262-b8416dfc72ec ONLINE 0 0 0
0fabfe6b-5305-4711-921c-926110df24b7 OFFLINE 0 0 0
864e8c2d-0925-4018-b440-81807d3c5c9a ONLINE 11 0 0
d7b9a2ec-5f26-4649-a7fb-cb1ae953825e ONLINE 0 0 0
dd453900-d8c0-430d-bc1f-c022e62417ae OFFLINE 0 0 0
507666cd-91e2-4960-af02-b15899a22487 ONLINE 0 0 0
5eaf90b6-0ad1-4ec0-a232-d704c93dae9a ONLINE 0 0 0
cf9cc737-a704-4bea-bcee-db2cfe4490b7 ONLINE 0 0 0
50cc36be-001d-4e00-a0ca-e4b557bd6852 DEGRADED 366K 0 2 too many errors
142d45e8-4f30-4492-b01f-f22cba129fee DEGRADED 596K 0 6 too many errors
```
And we get the serials of each of our drives so as to ensure the degraded drives don't get pulled back into the pool:
```sh
for id in $(sudo zpool status TEMP | grep -E "[0-9a-fA-F]{8}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{4}\b-[0-9a-fA-F]{12} (ONLINE|DEGRADED|FAULTED)" | tr -s ' ' | cut -d' ' -f 2); do
echo -n "$id -> ";
ls -l /dev/disk/by-partuuid |\
grep $id |\
tr -s ' ' |\
cut -d' ' -f 11 |\
cut -d'/' -f 3 |\
sed 's/^/\/dev\//' |\
xargs sudo smartctl -a |\
grep Serial |\
tr -s ' ' |\
cut -d' ' -f 3
done
```
Which gives us:
```
dad98d96-3cbe-469e-b262-b8416dfc72ec -> 2EKA92XX
864e8c2d-0925-4018-b440-81807d3c5c9a -> VJG2T4YX
d7b9a2ec-5f26-4649-a7fb-cb1ae953825e -> VKH3XR2X
507666cd-91e2-4960-af02-b15899a22487 -> VJG1NP9X
5eaf90b6-0ad1-4ec0-a232-d704c93dae9a -> VKH40L6X
cf9cc737-a704-4bea-bcee-db2cfe4490b7 -> VJG2808X
50cc36be-001d-4e00-a0ca-e4b557bd6852 -> VKHNH0GX # Degraded
142d45e8-4f30-4492-b01f-f22cba129fee -> VKJWPAEX # Degraded
```
Then we hit the big red button again: `Storage -> TEMP -> Export/Disconnect`
- [X] Destroy data on this pool?
- [X] Delete configuration of shares that used this pool?
- [X] Confirm Export/Disconnect?
Cry a little bit, then hit the final Export/Disconnect button.
And we'll also grab the partuuid-to-serial mappings for the Media pool:
```
a9df1c82-cc15-4971-8080-42056e6213dd -> VJGJVTZX
8398ae95-9119-4dd6-ab3a-5c0af82f82f4 -> VKGW5YGX
44ae3ae0-e8f9-4dbc-95ba-e64f63ab7460 -> VJG282NX
eda6547f-9f25-4904-a5bd-8f8b4e36d859 -> 2EKA903X
05241f52-542c-4c8c-8f20-d34d2878c41a -> VJGRRG9X
38cd7315-e269-4acc-a05b-e81362a9ea39 -> VKH3Y3XX
90be0e9e-7af1-4930-9437-c36c24ea81c5 -> VJGR6TNX
29b36c4c-8ad2-4dcb-9b56-08f5458817d2 -> VJGJAS1X
d59a8281-618d-4bab-bd22-9f9f377baacf -> 2EKATR2X
e0431a50-b5c6-459e-85bd-d648ec2c21d6 -> VJGK56KX
cd4808a8-a137-4121-a5ff-4181faadee64 -> VJGJUWNX
```
We shutdown the server, then the NAS.
After shutting down the NAS, I realize that I am stupid. My one-liner to convert partuuid to serial only grabs devices with the ONLINE or DEGRADED status, not FAULTED. Whatever. We can fix that later.
Next, we're going to formalize a few of the datasets we had in the Media pool:
- `Media/Media/3D Printing` -> `Media/3DPrinting`
- `Media/Media/Audio` -> `Media/Audio`
- `Media/Media/Images` -> `Media/Images`
- `Media/Media/Text` -> `Media/Text`
- `Media/Media/Video` -> `Media/Video`
We're basically pulling every type of Media up one directory.
### Configuring ACLs for New Datasets
Our hosts are configured to connect as the user `smbuser` with the group `smbuser`.
So when we create a Unix ACL for a new dataset, we configure as follows:
1. Owner -> User: `smbuser` with box checked for Apply User
2. Owner -> Group: `smbuser` with box checked for Apply Group
3. Check box for Apply permissions recursively. (Confirm and continue).
4. Leave access mode matrix as default (755).
5. Save.
### Riding the Update Train
*choo choo*
It's been a while since I updated TrueNAS. This install was created a bit before TrueNAS existed, and updated once from FreeNAS (BSD) to TrueNAS Scale (Linux).
Our installed version is TrueNAS-22.12.3. Latest stable is 23.10.2. Latest beta is 24.04.
According to the [upgrade paths](https://www.truenas.com/docs/truenasupgrades/#upgrade-paths) page, our upgrade path should go:
1. To `22.12.4.2`, the final patch of 22.12.
2. To `23.10.1.3`, the latest stable version of the Cobria update train.
From there, we have the choice to upgrade to the Dragonfish nightly build ([release notes](https://www.truenas.com/docs/scale/gettingstarted/scalereleasenotes/)).
### Setting up Rsync Backups
In order to connect to our backup NAS, we use the following parameters when configuring our Rsync tasks (we'll use the `HomeVideos` dataset for example):
- Path: `/mnt/Media/HomeVideos/` We use the trailing slash. I'm not sure why, but that's how it was, and so it shall stay.
- Rsync Mode: `SSH`
- Connect using: `SSH private key stored in user's home directory` We have an SSH private key in the home directory of the `root` user.
- Remote Host: `admin@192.168.1.11`
- Remote SSH Port: `22`
- Remote Path: `/mnt/Backup/Backup/Media/Media/Video/HomeVideos` We have the data organized by the old dataset layout. Some day I'll fix that. Surely...
- User: `root`
- Direction: `Push`
- Description: `Backup: HomeVideos`
- Schedule: `Daily (0 0 * * *) At 00:00 (12:00 AM)`
- Recursive: `[X]`
- Enabled: `[X]`
- Times: `[X]`
- Compress: `[X]`
- Archive: `[X]`
- Delete: `[X]`
- Delay Updates: `[X]`
### Reorganizing our Media shares
In moving our datasets,
- `/mnt/Media/Media/Video/Movies` to `/mnt/Media/Movies`,
- `/mnt/Media/Media/Video/Shows` to `/mnt/Media/Shows`, and
- `/mnt/Media/Media/Audio/Music` to `/mnt/Media/Music`
We will need to reorganize some stuff, and reconfigure anything dependent on those datasets. This includes:
- SMB shares (`Media` will need to be replaced with `Movies` and `Shows`)
- Snapshot tasks will need to be created for the datasets
- SMB client reconfiguration. Any hosts connecting to the old `Media` share is expecting a certain directory structure below it. We'll need to cope with that.
Below I document all uses of the `/mnt/nas/media` directory in absolute host:container mappings:
- Autopirate:
- Radarr: `/mnt/nas/media/Video/Movies:/movies`
- Sonarr: `/mnt/nas/media/Video/Shows:/shows`
- Bazarr: `/mnt/nas/media/Video/Movies:/movies`, `/mnt/nas/media/Video/Shows:/tv`
- Sabnzbd: `/mnt/nas/media/Video/Movies:/movies`, `/mnt/nas/media/Video/Shows:/shows`, `/mnt/nas/media/Audio/Music:/music`
- Tdarr: `/mnt/nas/media/Video/Movies:/movies`, `/mnt/nas/media/Video/Shows:/shows`
- Tdarr-node: `/mnt/nas/media/Video/Movies:/movies`, `/mnt/nas/media/Video/Shows:/shows`
- Jellyfin:
- Jellyfin: `/mnt/nas/media/Video/Movies:/data/movies`, `/mnt/nas/media/Video/Shows:/data/tvshows`
- Plex:
- Plex: `/mnt/nas/media/Video/Movies:/movies`, `/mnt/nas/media/Video/Shows:/shows`, `/mnt/nas/media/Audio/Music:/music`
We're gonna have to refactor all of these.
Most use `MEDIA_DIR=/mnt/nas/media` in their `.env` file as the baseline.
We'll need to replace that with `MOVIES_DIR=/mnt/nas/movies` and `SHOWS_DIR=/mnt/nas/shows`. Also `MUSIC_DIR=/mnt/nas/music` I guess.
Then we'll need to find the lines in each compose file which look like `${MEDIA_DIR}/Video/Movies` and `${MEDIA_DIR}/Video/Shows` and replace them with `${MOVIES_DIR}` and `${SHOWS_DIR}` respectively.
Also `${MEDIA_DIR}/Audio/Music` to `${MUSIC_DIR}`.
None of the container-side mappings should need to be changed.
### Replacing Yet Another Disk
The drive hosting part-uuid `d50abb30-81fd-49c6-b22e-43fcee2022fe` failed 7 SMART short tests in a row while we were moving our data around. Great.
So we get the disk ID from the part uuid (we already knew it was `/dev/sdx` because of the email notifications I was getting spammed with during the move, but let's follow the exercise) with `ls -l /dev/disk/by-partuuid | grep d50abb30-81fd-49c6-b22e-43fcee2022fe`, which informed us that the partition label was `../../sdx2`. So we open the web UI, navigate to the Manage Disks panel of the Media pool, find our bad drive, make note of the serial number, and hit Offline. Once that's done, we check [diskshelfmap](./DISKSHELFMAP.md) to see where that drive was located. We physically remove the caddy from the shelf, then the drive from the caddy. Throw the new drive in, and note its serial number and document the swap in diskshelfmap. We wait a bit for the drive to be recognized. We run a quick sanity check on the new drive to make sure its SMART info looks good and the serial number matches `smartctl -a /dev/sdx`, then we kick off the replacement and resilver with `zpool replace Media d50abb30-81fd-49c6-b22e-43fcee2022fe /dev/sdx`.
Now we wait like 2 days for the resilver to finish and we hope no other drives fail in the meantime.

View File

@ -0,0 +1,109 @@
# Summary
Barbarian is a TrueNAS host built from old gaming PC hand-me-downs.
# Hardware
| Part | Make & Model | Notes | Link |
|:----:|:------------:|:-----:|:----:|
| Case | RSV-L4500U | | [Rosewill.com](https://www.rosewill.com/rosewill-rsv-l4500u-black/p/9SIA072GJ92805)
| PSU |
|
# Pools
I have 2 pools, one for "Media" and one for everything else. All disks are 8 TB HGST/Hitachi drives with a sector size of 4096B. All pools use vdevs of 3 drives in RAIDZ1.
## Replace a failing disk
1. Get the serial number of the drive. If you can see the part-uuid in `zpool status` (e.g. `44ae3ae0-e8f9-4dbc-95ba-e64f63ab7460`), you can get the serial number via:
```
id=44ae3ae0-e8f9-4dbc-95ba-e64f63ab7460
label=$(ls -l /dev/disk/by-partuuid | grep $id | cut -d' ' -f 11 | cut -d'/' -f 3 | sed 's/^/\/dev\//')
serial=$(sudo smartctl -a $label | grep Serial | tr -s ' ' | cut -d' ' -f 3)
echo "$id -> $label -> $serial"
```
2. Offline and remove the failing disk. Refer to the [Physical Disk Locations](#physical-disk-locations-ds4243) chart to determine which shelf slot the disk is in. Remove the disk. Run `zpool offline [pool] [disk]`
3. Insert the new disk and wait 60 seconds for it to be detected.
4. Wipe the new disk. Find the new disk on the disks page, expand it, and run a quick wipe.
5. Replace the removed disk with the new disk. On the devices page for the affected pool, click the removed drive. Then hit Replace from the Disk Info card, find the new drive in the drop-down, and run begin the replacement operation.
- You can check the sector size of the new disk with `smartctl -a /dev/<disknum> | grep block`. If `Logical block size` is `512 bytes` (or anything other than `4096 bytes`), then the disk needs to be reformatted. Reformat the disk as described under [Convert a 512B-sector disk](#convert-a-512b-sector-disk-to-4096b-sectors). This will take several hours.
## Convert a 512B-sector disk to 4096B sectors
1. Get the disknum (like `da4`), either in the web UI or with the ~/disklist.pl script, for the disk that needs to be replaced.
2. Check the current sector size with `smartctl -a /dev/<disknum> | grep block`. If `Logical block size` is `512 bytes` (or anything other than `4096 bytes`), then the disk needs to be reformatted.
3. Reformat the disk(s) with the `sg_format` command. Use the following flags: `--size=4096 --format --fmtpinfo=0`. Then finally the disk location (e.g. `/dev/da15`). Use the `nohup` utility and the `&` operator to run the command in the background. An example one-liner for three disks (`da15`, `da16`, `da17`):
```bash
nohup sg_format --size=4096 --format --fmtpinfo=0 /dev/da15 & \
nohup sg_format --size=4096 --format --fmtpinfo=0 /dev/da16 & \
nohup sg_format --size=4096 --format --fmtpinfo=0 /dev/da17 &
```
Alternatively, try this for the first time:
```bash
for disk in da15 da16 da17; do mkdir -p ~/.formatting/$disk && cd ~/.formatting/$disk && nohup sg_format --size=4096 --format --fmtpinfo=0 /dev/$disk &; done
```
5. Close the terminal. Then log back in and run `ps -aux | grep sg_format` to confirm all processes are running. Check SMART status for disks with `for disk in da15 da16 da17; do smartctl -a /dev/$disk; done` (where `da15 da16 da17` is your list of disks).
6. Wait 12-16 hours (for 8 TB disk).
7. Remove and re-insert the disk.
## Perform a large copy operation in the background
0. `cd ~` for consistent placement of `nohup.out`
1. Use `nohup cp -rv /mnt/[from_pool]/[from_dataset]/ /mnt/[to_pool]/[to_dataset] && echo "" | mail -s "Copy /mnt/[from_pool]/[from_dataset]/ to /mnt/[to_pool/[to_dataset] complete" root` (pay attention to trailing slashes) to run the copy in the background and send an email when the copy is complete. This will persist closing the terminal and completely closing the SSH connection.
2. Use `cmdwatch du -h ~/nohup.out` to watch the size of the log file increase (to confirm it is still copying)
3. Use `tail -f ~/nohup.out` to follow the actual logs. The original command writes to this file in batches when it is in the background, so don't expect it to be as smooth as running the command in the foreground.
## Perform a copy operation in the foreground with progress monitoring
Use `rsync -ah --progress $SOURCE $DESTINATION`
Note that if the source is something like `/first/path/to/folder1/` and you want to copy it to `/second/path/to/folder1/`, make sure to fully specify the destination path (`DESTINATION=/second/path/to/folder1/`). Where something like `cp` or `mv` would create the source folder in the destination folder, Rsync is more literal.
# Services
## S.M.A.R.T.
All values default.
| Parameter | Value |
|:---------:|:-----:|
| Start Automatically | Yes |
| Check interval | 30 minutes |
| Difference | 0 &deg;C |
| Informational | 0 &deg;C |
| Critical | 0 &deg;C |
## SMB
| Parameter | Value |
|:---------:|:-----:|
| Start Automatically | Yes |
| NetBIOS Name | joey-nas |
| NetBIOS Alias | - |
| Workgroup | WORKGROUP |
| Description | FreeNAS Server |
| Enable SMB1 Support | No |
| NTLMv1 Auth | No |
| UNIX Charset | UTF-8 |
| Log Level | Minimum |
| Use Syslog Only | No |
| Local Master | Yes |
| Enable Apple SMB2/3 Protocol Extensions | No |
| Administrators Group | - |
| Guest Account | nobody |
| File Mask | - |
| Directory Mask | - |
| Bind IP Addresses | 192.168.1.10,192.168.50.1 |
| Auxilliary Parameters | - |
## SSH
| Parameter | Value |
|:---------:|:-----:|
| Start Automatically | Yes |
| TCP Port | 22 |
| Log in as Root with Password | Yes |
| Allow Password Authentication | Yes |
| Allow Kerberos Authentication | No |
| Allow TCP Port Forwarding | No |
# Users, Groups, Permissions
TODO, not yet designed.

View File

@ -0,0 +1,35 @@
#!/bin/sh
FROM_DIR=$1 # trailing slash
TO_DIR=$2 # trailing slash
LOG_FILE=/mnt/Tank/home/admin/copy.log
rsync -avhW $FROM_DIR $TO_DIR > $LOG_FILE
# If filesize is less than 23 million bytes (21.93 MiB)
# Gives generous headroom for Gmail max attachment size
if [ $(ls -la $LOG_FILE | awk '{print $5}') -le 23000000 ]; then
( echo "Subject: Copy $FROM_DIR to $TO_DIR operation complete."
echo "Mime-Version: 1.0"
echo "Content-Type: multipart/mixed; boundary=\"d29a0c638b540b23e9a29a3a9aebc900aeeb6a82\""
echo "Content-Transfer-Encoding: 7bit"
echo ""
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82"
echo "Content-Type: text/html; charset=\"UTF-8\""
echo "Content-Transfer-Encoding: 7bit"
echo "Content-Disposition: inline"
echo ""
echo "Copy $FROM_DIR to $TO_DIR complete. See log at $LOG_FILE for details about what was copied."
echo ""
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82"
echo "Content-Type: text/plain"
echo "Content-Transfer-Encoding: base64"
echo "Content-Disposition: attachment; filename=\"$(basename $LOG_FILE)\""
echo ""
base64 "$LOG_FILE"
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82--"
echo ""
echo "--d29a0c638b540b23e9a29a3a9aebc900aeeb6a82--"
) | sendmail root
else
echo "Filesize too large to attach. See log file at $LOG_FILE for details." | mail -s "Copy $FROM_DIR to $TO_DIR operation complete." root
fi

View File

@ -0,0 +1,30 @@
# ./inxi -CDGmMNPS --dmidecode
System: Host: barbarian Kernel: 6.6.32-production+truenas x86_64 bits: 64 Console: tty pts/0
Distro: Debian GNU/Linux 12 (bookworm)
Machine: Type: Desktop Mobo: Gigabyte model: X99-SLI-CF v: x.x serial: N/A UEFI: American Megatrends v: F24a rev: 5.6
date: 01/11/2018
Memory: RAM: total: 62.65 GiB used: 2.48 GiB (4.0%)
Array-1: capacity: 512 GiB note: check slots: 8 EC: None
Device-1: DIMM_A1 size: 8 GiB speed: 2133 MT/s
Device-2: DIMM_A2 size: 8 GiB speed: 2133 MT/s
Device-3: DIMM_B1 size: 8 GiB speed: 2133 MT/s
Device-4: DIMM_B2 size: 8 GiB speed: 2133 MT/s
Device-5: DIMM_C1 size: 8 GiB speed: 2133 MT/s
Device-6: DIMM_C2 size: 8 GiB speed: 2133 MT/s
Device-7: DIMM_D1 size: 8 GiB speed: 2133 MT/s
Device-8: DIMM_D2 size: 8 GiB speed: 2133 MT/s
CPU: Info: 6-Core model: Intel Core i7-5930K bits: 64 type: MT MCP cache: L2: 15 MiB
Speed: 1200 MHz min/max: 1200/3700 MHz Core speeds (MHz): 1: 1200 2: 1200 3: 1200 4: 1200 5: 1200 6: 1200 7: 2191
8: 1200 9: 1200 10: 1200 11: 1200 12: 1200
Graphics: Device-1: NVIDIA GK208B [GeForce GT 710] driver: N/A
Display: server: No display server data found. Headless machine? tty: 154x70
Message: Unable to show advanced data. Required tool glxinfo missing.
Network: Device-1: Intel Ethernet I218-V driver: e1000e
Drives: Local Storage: total: raw: 74.53 GiB usable: 146.26 GiB used: 3.95 GiB (2.7%)
ID-1: /dev/sda vendor: Intel model: SSDSCKGW080A4 size: 74.53 GiB
Partition: ID-1: / size: 30.12 GiB used: 163.8 MiB (0.5%) fs: zfs logical: freenas-boot/ROOT/24.04.2
ID-2: /home size: 29.96 GiB used: 128 KiB (0.0%) fs: zfs logical: freenas-boot/ROOT/24.04.2/home
ID-3: /opt size: 30.03 GiB used: 72.1 MiB (0.2%) fs: zfs logical: freenas-boot/ROOT/24.04.2/opt
ID-4: /usr size: 31.85 GiB used: 1.89 GiB (5.9%) fs: zfs logical: freenas-boot/ROOT/24.04.2/usr
ID-5: /var size: 29.98 GiB used: 19.9 MiB (0.1%) fs: zfs logical: freenas-boot/ROOT/24.04.2/var
ID-6: /var/log size: 30.04 GiB used: 85.9 MiB (0.3%) fs: zfs logical: freenas-boot/ROOT/24.04.2/var/log

View File

@ -0,0 +1 @@
DOCKER_DATA=/mnt/volume_sfo3_01/5etools

View File

@ -0,0 +1,22 @@
services:
5etools:
container_name: 5etools
image: jafner/5etools-docker:latest
restart: "no"
volumes:
- $DOCKER_DATA/htdocs:/usr/local/apache2/htdocs
environment:
- IMG=TRUE
- PGID=1003
- PUID=1003
networks:
- web
labels:
- traefik.http.routers.5etools.rule=Host(`5e.jafner.tools`)
- traefik.http.routers.5etools.tls.certresolver=lets-encrypt
- traefik.http.routers.5etools.tls.options=tls12@file
- traefik.http.routers.5etools.middlewares=securityheaders@file
networks:
web:
external: true

View File

@ -0,0 +1 @@
DOCKER_DATA=/home/admin/data/gitea

View File

@ -0,0 +1,82 @@
# `app.ini` Snippets
The main Gitea config file is located at `~/data/gitea/gitea/gitea/conf/app.ini`.
Configure connection to postgres DB container.
```
[database]
PATH = /data/gitea/gitea.db
DB_TYPE = postgres
HOST = postgres:5432
NAME = gitea
USER = gitea
PASSWD = [Located at postgres_secrets.env]
LOG_SQL = false
SCHEMA =
SSL_MODE = disable
```
Disable OpenID as login option.
```
[openid]
ENABLE_OPENID_SIGNIN = false
ENABLE_OPENID_SIGNUP = false
```
Allow migrating from specific domains.
```
[migrations]
ALLOWED_DOMAINS = gitlab.jafner.net, *.github.com, github.com
```
Configure SMTP email.
```
[mailer]
ENABLED = true
FROM = Gitea
PROTOCOL = smtp+starttls
SMTP_ADDR = smtp.protonmail.ch
SMTP_PORT = 587
USER = noreply@jafner.net
PASSWD = `****************`
```
## Apply changes
Just restart the container.
`cd ~/homelab/druid/config/gitea && docker compose up -d --force-recreate`
# Re-register Gitea Runners
To force the runners to re-register (to apply updated labels, for example).
1. Stop and remove the containers. Run `docker ps -aq --filter name="gitea_runner-*" | xargs docker stop | xargs docker rm`.
2. Delete the `.runner` files for each runner. Run `find ~/data/gitea/ -name ".runner" -delete`.
3. (Optional) Update runner config. Modify the `config.yaml` file as needed. [Official example config](https://gitea.com/gitea/act_runner/src/branch/main/internal/pkg/config/config.example.yaml).
4. Bring the runners back up. Run `docker compose up -d` from the gitea directory.
# Delete Registed Runners
Apparently a misconfigured Docker-in-Docker runner may sometimes retry registering over and over until the heat death of the universe. In that case you will end up with many "ghost" runners. In my case, 27,619. To resolve, you can either step through each one and click "edit", then "delete", then "confirm". Or you can just use the database.
1. `docker exec -it gitea_postgres psql --username "gitea"` To open a terminal inside the container and open a CLI session to the database.
2. `\c gitea` To select the 'gitea' database.
3. `DELETE FROM action_runner WHERE id NOT IN (50, 66);` To delete all entries except those with the IDs I wanted to keep.
# Disable native auth
We don't want to use Gitea's native auth. We want Keycloak to handle all our authentication. So we place a template override in the correct directory, which Gitea picks up on startup to generate the signin page.
The file [`signin_inner.tmpl`](signin_inner.tmpl) must be placed into `/data/gitea/templates/user/auth/` *inside the container*. In our case, that means `~/data/gitea/gitea/gitea/templates/user/auth/` on the host system.
For this to work properly, we use the following `app.ini` snippets:
```ini
[service]
DISABLE_REGISTRATION = true
ALLOW_ONLY_EXTERNAL_REGISTRATION = true
[openid]
ENABLE_OPENID_SIGNIN = false
ENABLE_OPENID_SIGNUP = false
[oauth2_client]
ENABLE_AUTO_REGISTRATION = true
ACCOUNT_LINKING = disabled
```

View File

@ -0,0 +1,60 @@
services:
gitea:
image: gitea/gitea:latest
container_name: gitea_gitea
env_file:
- path: ./gitea.env
required: true
restart: always
networks:
- web
- gitea
volumes:
- $DOCKER_DATA/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "2225:22"
labels:
- traefik.http.routers.gitea.rule=Host(`gitea.jafner.tools`)
- traefik.http.routers.gitea.tls.certresolver=lets-encrypt
- traefik.http.routers.gitea.tls.options=tls12@file
- traefik.http.routers.gitea.middlewares=securityheaders@file
- traefik.http.routers.gitea.service=gitea
- traefik.http.services.gitea.loadbalancer.server.port=3000
postgres:
image: postgres:13
container_name: gitea_postgres
networks:
- gitea
env_file:
- path: ./postgres_secrets.env
required: false
volumes:
- postgres_data:/var/lib/postgresql/data
runner-ubuntu:
image: gitea/act_runner:latest
container_name: gitea_runner-ubuntu
depends_on:
- gitea
env_file:
- path: ./runner-ubuntu.env
required: true
- path: ./runner_secrets.env
required: false
networks:
- gitea
volumes:
- ./runner_ubuntu_config.yaml:/config.yaml
- $DOCKER_DATA/runner-ubuntu:/data
- /var/run/docker.sock:/var/run/docker.sock
networks:
web:
external: true
gitea:
volumes:
postgres_data:

View File

@ -0,0 +1,2 @@
USER_UID=1002
USER_GID=1002

View File

@ -0,0 +1,4 @@
CONFIG_FILE=/config.yaml
GITEA_INSTANCE_URL=https://gitea.jafner.tools
GITEA_RUNNER_NAME=druid2
GITEA_RUNNER_LABELS=dind,druid

View File

@ -0,0 +1,4 @@
CONFIG_FILE=/config.yaml
GITEA_INSTANCE_URL=https://gitea.jafner.tools
GITEA_RUNNER_NAME=druid1
GITEA_RUNNER_LABELS=ubuntu-latest:docker://node:16-bullseye,ubuntu-22.04:docker://node:16-bullseye,ubuntu-20.04:docker://node:16-bullseye,ubuntu-18.04:docker://node:16-buster,druid

View File

@ -0,0 +1,89 @@
# Example configuration file, it's safe to copy this as the default config file without any modification.
# You don't have to copy this file to your instance,
# just run `./act_runner generate-config > config.yaml` to generate a config file.
log:
# The level of logging, can be trace, debug, info, warn, error, fatal
level: info
runner:
# Where to store the registration result.
file: .runner
# Execute how many tasks concurrently at the same time.
capacity: 1
# Extra environment variables to run jobs.
envs:
A_TEST_ENV_NAME_1: a_test_env_value_1
A_TEST_ENV_NAME_2: a_test_env_value_2
# Extra environment variables to run jobs from a file.
# It will be ignored if it's empty or the file doesn't exist.
env_file: .env
# The timeout for a job to be finished.
# Please note that the Gitea instance also has a timeout (3h by default) for the job.
# So the job could be stopped by the Gitea instance if it's timeout is shorter than this.
timeout: 3h
# Whether skip verifying the TLS certificate of the Gitea instance.
insecure: false
# The timeout for fetching the job from the Gitea instance.
fetch_timeout: 5s
# The interval for fetching the job from the Gitea instance.
fetch_interval: 2s
# The labels of a runner are used to determine which jobs the runner can run, and how to run them.
# Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"]
# If it's empty when registering, it will ask for inputting labels.
# If it's empty when execute `deamon`, will use labels in `.runner` file.
labels: []
cache:
# Enable cache server to use actions/cache.
enabled: true
# The directory to store the cache data.
# If it's empty, the cache data will be stored in $HOME/.cache/actcache.
dir: ""
# The host of the cache server.
# It's not for the address to listen, but the address to connect from job containers.
# So 0.0.0.0 is a bad choice, leave it empty to detect automatically.
host: ""
# The port of the cache server.
# 0 means to use a random available port.
port: 0
# The external cache server URL. Valid only when enable is true.
# If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself.
# The URL should generally end with "/".
external_server: ""
container:
# Specifies the network to which the container will connect.
# Could be host, bridge or the name of a custom network.
# If it's empty, act_runner will create a network automatically.
network: ""
# Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker).
privileged: false
# And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway).
options:
# The parent directory of a job's working directory.
# If it's empty, /workspace will be used.
workdir_parent:
# Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob
# You can specify multiple volumes. If the sequence is empty, no volumes can be mounted.
# For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to:
# valid_volumes:
# - data
# - /src/*.json
# If you want to allow any volume, please use the following configuration:
# valid_volumes:
# - '**'
valid_volumes: []
# overrides the docker client host with the specified one.
# If it's empty, act_runner will find an available docker host automatically.
# If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers.
# If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work.
docker_host: ""
# Pull docker image(s) even if already present
force_pull: false
host:
# The parent directory of a job's working directory.
# If it's empty, $HOME/.cache/act/ will be used.
workdir_parent:

View File

@ -0,0 +1,20 @@
{{if or (not .LinkAccountMode) (and .LinkAccountMode .LinkAccountModeSignIn)}}
{{template "base/alert" .}}
{{end}}
<div class="ui attached segment">
{{if .OAuth2Providers}}
<div id="oauth2-login-navigator" class="gt-py-2">
<div class="gt-df gt-fc gt-jc">
<div id="oauth2-login-navigator-inner" class="gt-df gt-fc gt-fw gt-ac gt-gap-3">
{{range $provider := .OAuth2Providers}}
<a class="{{$provider.Name}} ui button gt-df gt-ac gt-jc gt-py-3 gt-w-full oauth-login-link" href="{{AppSubUrl}}/user/oauth2/{{$provider.DisplayName}}">
{{$provider.IconHTML 28}}
{{ctx.Locale.Tr "sign_in_with_provider" $provider.DisplayName}}
</a>
{{end}}
</div>
</div>
</div>
{{end}}
</form>
</div>

View File

@ -0,0 +1,18 @@
services:
exporter-docker:
image: prometheusnet/docker_exporter:latest
container_name: monitoring_exporter-docker
restart: "no"
ports:
- 50417:9417
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
exporter-ping:
image: czerwonk/ping_exporter:latest
container_name: monitoring_exporter-ping
restart: "no"
ports:
- 50418:9427
volumes:
- ./exporter-ping.yml:/config/config.yml

View File

@ -0,0 +1,26 @@
targets:
- 8.8.8.8 # google primary
- 1.1.1.1 # cloudflare primary
- 9.9.9.9 # quad9 primary
- jafner.net # homelab public ip
- jafner.tools # droplet public ip
- tukw-dsl-gw76.tukw.qwest.net # Owned by CENTURYLINK-US-LEGACY-QWEST (ASN: 209) (https://www.findip-address.com/63.231.10.76)
- tukw-agw1.inet.qwest.net # Owned by CENTURYLINK-US-LEGACY-QWEST (ASN: 209) (https://www.findip-address.com/63.226.198.89)
- sea-edge-15.inet.qwest.net # Owned by CENTURYLINK-US-LEGACY-QWEST (ASN: 209) (https://www.findip-address.com/205.171.0.59)
- 4.68.38.173 # Owned by LEVEL3 (ASN: 3356) (https://www.findip-address.com/4.68.38.173)
- 4.69.219.210 # Owned by LEVEL3 (ASN: 3356) (https://www.findip-address.com/4.69.219.210)
- 4.30.140.62 # Owned by LEVEL3 (ASN: 3356) (https://www.findip-address.com/4.30.140.62)
- 172.71.144.3 # Owned by CLOUDFLARENET (ASN: 13335) (https://www.findip-address.com/172.71.144.3)
dns:
refresh: 1m
nameserver: 1.1.1.1
ping:
interval: 5s
timeout: 3s
history-size: 5
payload-size: 32
options:
disableIPv6: true

View File

@ -0,0 +1,12 @@
version: '3'
services:
socks5:
container_name: socks5-proxy_socks5-proxy
restart: always
image: serjs/go-socks5-proxy
environment:
PROXY_USER: 'fighter'
PROXY_PASSWORD: ${socks5_PROXY_PASSWORD}
PROXY_PORT: 1080
ports:
- "1080:1080"