diff --git a/index.html b/index.html index c0edcc0d..d0dd6625 100644 --- a/index.html +++ b/index.html @@ -4416,6 +4416,7 @@
Notes I made on various topics.
The wiki is built with MkDocs and GitHub Pages. It supports inline PlantUML diagrams.
Inspired by wiki.nikitavoloboev.xyz & The Blue Book.
diff --git a/search/search_index.json b/search/search_index.json index 0fe96085..844fa37e 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"The wiki is built with MkDocs and GitHub Pages. It supports inline PlantUML diagrams.
Inspired by wiki.nikitavoloboev.xyz & The Blue Book.
"},{"location":"#using-the-wiki","title":"Using the wiki","text":"You can quickly search the contents of this wiki above or you can explore the tree view to the left.
"},{"location":"Applications/wallabag/","title":"Wallabag","text":""},{"location":"Applications/wallabag/#run-a-console-command-in-the-container","title":"Run a console command in the container","text":"docker exec -it <containerName> /var/www/wallabag/bin/console <command> --env=prod\n
"},{"location":"Applications/wallabag/#list-commands","title":"List commands","text":"docker exec -it wallabag /var/www/wallabag/bin/console list --env=prod\n
"},{"location":"Applications/wallabag/#get-help-for-a-command","title":"Get help for a command","text":"docker exec -it wallabag /var/www/wallabag/bin/console help <command> --env=prod\n
"},{"location":"Applications/wallabag/#initialize-the-database","title":"Initialize the database","text":"docker exec -it wallabag /var/www/wallabag/bin/console wallabag:install --env=prod --no-interaction\n
"},{"location":"Applications/wallabag/#migrate-database","title":"Migrate database","text":"docker exec -it wallabag /var/www/wallabag/bin/console doctrine:migrations:migrate --env=prod --no-interaction\n
"},{"location":"Applications/wallabag/#list-users","title":"List users","text":"docker exec -it wallabag /var/www/wallabag/bin/console wallabag:user:list --env=prod\n
"},{"location":"Applications/wallabag/#create-a-new-user","title":"Create a new user","text":"docker exec -it wallabag /var/www/wallabag/bin/console fos:user:create --env=prod\n
"},{"location":"Applications/wallabag/#make-a-user-super-admin","title":"Make a user super admin","text":"docker exec -it wallabag /var/www/wallabag/bin/console fos:user:promote <user> --super --env=prod\n
"},{"location":"Applications/wallabag/#demote-and-deactivate-the-initial-wallabag-user","title":"Demote and deactivate the initial wallabag user","text":"docker exec -it wallabag /var/www/wallabag/bin/console fos:user:demote wallabag --super --env=prod\ndocker exec -it wallabag /var/www/wallabag/bin/console fos:user:deactivate wallabag --env=prod\n
"},{"location":"Applications/PiHole/add-unbound/","title":"Add Unbound as a recursive DNS Server to the PiHole setup","text":""},{"location":"Applications/PiHole/add-unbound/#why-would-you-want-this","title":"Why would you want this?","text":"sudo apt install unbound\n
"},{"location":"Applications/PiHole/add-unbound/#configuration","title":"Configuration","text":"Configure unbound with:
cat << EOF > /etc/unbound/unbound.conf.d/pi-hole.conf\nserver:\n # If no logfile is specified, syslog is used\n # logfile: \"/var/log/unbound/unbound.log\"\n verbosity: 0\n\n # view more statistics\n extended-statistics: yes\n\n interface: 127.0.0.1\n port: 5335\n do-ip4: yes\n do-udp: yes\n do-tcp: yes\n\n # May be set to yes if you have IPv6 connectivity\n do-ip6: yes\n\n # You want to leave this to no unless you have *native* IPv6. With 6to4 and\n # Terredo tunnels your web browser should favor IPv4 for the same reasons\n prefer-ip6: no\n\n # Use this only when you downloaded the list of primary root servers!\n # If you use the default dns-root-data package, unbound will find it automatically\n #root-hints: \"/var/lib/unbound/root.hints\"\n\n # Trust glue only if it is within the server's authority\n harden-glue: yes\n\n # Require DNSSEC data for trust-anchored zones, if such data is absent, the zone becomes BOGUS\n harden-dnssec-stripped: yes\n\n # Don't use Capitalization randomization as it known to cause DNSSEC issues sometimes\n # see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378 for further details\n use-caps-for-id: no\n\n # Reduce EDNS reassembly buffer size.\n # IP fragmentation is unreliable on the Internet today, and can cause\n # transmission failures when large DNS messages are sent via UDP. Even\n # when fragmentation does work, it may not be secure; it is theoretically\n # possible to spoof parts of a fragmented DNS message, without easy\n # detection at the receiving end. Recently, there was an excellent study\n # >>> Defragmenting DNS - Determining the optimal maximum UDP response size for DNS <<<\n # by Axel Koolhaas, and Tjeerd Slokker (https://indico.dns-oarc.net/event/36/contributions/776/)\n # in collaboration with NLnet Labs explored DNS using real world data from the\n # the RIPE Atlas probes and the researchers suggested different values for\n # IPv4 and IPv6 and in different scenarios. They advise that servers should\n # be configured to limit DNS messages sent over UDP to a size that will not\n # trigger fragmentation on typical network links. DNS servers can switch\n # from UDP to TCP when a DNS response is too big to fit in this limited\n # buffer size. This value has also been suggested in DNS Flag Day 2020.\n edns-buffer-size: 1232\n\n # Perform prefetching of close to expired message cache entries\n # This only applies to domains that have been frequently queried\n prefetch: yes\n\n # One thread should be sufficient, can be increased on beefy machines. In reality for most users running on small networks or on a single machine, it should be unnecessary to seek performance enhancement by increasing num-threads above 1.\n num-threads: 1\n\n # Ensure kernel buffer is large enough to not lose messages in traffic spikes\n so-rcvbuf: 1m\n\n # Ensure privacy of local IP ranges\n # Needs to be commented out if you have a public dns records (e.g. Cloudflare) resolving to\n # your local IP. Those records will otherwise be unresolvable.\n private-address: 192.168.0.0/16\n private-address: 169.254.0.0/16\n private-address: 172.16.0.0/12\n private-address: 10.0.0.0/8\n private-address: fd00::/8\n private-address: fe80::/10\nEOF\n
Signal PiHole to use this limit
cat << EOF > /etc/dnsmasq.d/99-edns.conf\nedns-packet-max=1232\nEOF\n
"},{"location":"Applications/PiHole/add-unbound/#restart-unbound","title":"Restart unbound","text":"sudo systemctl restart unbound\n
"},{"location":"Applications/PiHole/add-unbound/#test-unbound","title":"Test unbound","text":""},{"location":"Applications/PiHole/add-unbound/#query","title":"Query","text":"dig google.com @127.0.0.1 -p 5335\n
"},{"location":"Applications/PiHole/add-unbound/#dnssec","title":"DNSSec","text":"Get Servfail
dig sigfail.verteiltesysteme.net @127.0.0.1 -p 5335\n
Get NOERROR
dig sigok.verteiltesysteme.net @127.0.0.1 -p 5335\n
"},{"location":"Applications/PiHole/add-unbound/#configure-pihole","title":"Configure PiHole","text":"Now we need to tell PiHole to use unbound as an upstream DNS server.
This is done by editing /etc/pihole/setupVars.conf
and adding/replacing the following line:
PIHOLE_DNS_1=127.0.0.1#5335\nPIHOLE_DNS_2=127.0.0.1#5335\n
Restart PiHole
systemctl restart pihole-FTL.service\n
The PiHole web interface should now show under /admin/settings.php?tab=dns
that the upstream DNS server is 127.0.0.1#5335
.
Under /admin/queries.php
you should see that the queries are now forwarded to 127.0.0.1#5335
.
If that is not the case, maybe you need to manually save the settings in the web interface under /admin/settings.php?tab=dns
.
We want to have two PiHole instances that share the same ip address. If one of the instances fails the other one will take over the ip address.
They will also share the same gravity database so you only have to update the gravity database on one of the instances.
"},{"location":"Applications/PiHole/ha-setup/#why-do-we-want-this","title":"Why do we want this?","text":"If you have a PiHole instance running on a Raspberry Pi and it fails you will have to wait until you can fix it. This means manually changing the dns server on all your devices or trying to change the dhcp server to point to a different dns server. With this setup you will have a backup PiHole instance that will take over the ip address of the primary instance when it fails.
"},{"location":"Applications/PiHole/ha-setup/#requirements","title":"Requirements","text":"pihole -v
, update with pihole -up
pihole -r
to get them to work properlysudo apt install keepalived\n
"},{"location":"Applications/PiHole/ha-setup/#configure-keepalived","title":"Configure keepalived","text":"Script to check if local instance is running
cat << EOF > /usr/local/bin/check-local-pihole\n#!/bin/sh\n\nRUNNING=$(ps -aux | grep pihole-FTL | grep -v grep)\nexit $?\nEOF\n\nchmod +x /usr/local/bin/check-local-pihole\n
"},{"location":"Applications/PiHole/ha-setup/#configure-keepalived-on-the-primary-instance","title":"Configure keepalived on the primary instance","text":"cat << EOF > /etc/keepalived/keepalived.conf\nvrrp_script chk_local_pihole {\n script \"/usr/local/bin/check-local-pihole\" # (1)!\n interval 5\n weight -100\n}\n\nglobal_defs {\n router_id pihole-01 # (2)!\n script_user root\n enable_script_security\n}\n\nvrrp_instance PIHOLE {\n state MASTER # (3)!\n interface eth0 # (4)!\n virtual_router_id 20 # (5)!\n priority 150\n advert_int 1\n unicast_src_ip 192.168.3.21 # (6)!\n unicast_peer {\n 192.168.3.22 # (7)!\n }\n\n authentication {\n auth_type PASS\n auth_pass piholedns # (8)!\n }\n\n virtual_ipaddress {\n 192.168.3.20/23 # (9)!\n }\n\n track_script {\n chk_local_pihole\n }\n}\nEOF\n
cat << EOF > /etc/keepalived/keepalived.conf\nvrrp_script chk_pihole {\n script \"/usr/local/bin/check-local-pihole\"\n interval 1\n weight -100\n}\n\nglobal_defs {\n router_id pihole-02\n script_user root\n enable_script_security\n}\n\nvrrp_instance PIHOLE {\n state BACKUP\n interface eth0\n virtual_router_id 20\n priority 140\n advert_int 1\n unicast_src_ip 192.168.3.22\n unicast_peer {\n 192.168.3.21\n }\n\n authentication {\n auth_type PASS\n auth_pass piholedns\n }\n\n virtual_ipaddress {\n 192.168.3.20/23\n }\n\n track_script {\n chk_local_pihole\n }\n}\nEOF\n
"},{"location":"Applications/PiHole/ha-setup/#start-keepalived","title":"Start keepalived","text":"Run on both instances
systemctl enable --now keepalived.service\n
"},{"location":"Applications/PiHole/ha-setup/#test-keepalived","title":"Test keepalived","text":"ip a
on both instances or looking at the pihole dashboard in the top right cornerapt update && apt install sqlite3 sudo git cron rsync ssh\n
"},{"location":"Applications/PiHole/ha-setup/#install-gravity-sync-script","title":"Install gravity sync script","text":"We will use gravity-sync to sync the gravity database between the two instances.
Install gravity-sync on both instances and follow the instructions.
curl -sSL https://raw.githubusercontent.com/vmstan/gs-install/main/gs-install.sh | bash\n
You can always reset the configuration with gravity-sync config
Run the following command on the primary instance to push the gravity database to the secondary instance.
gravity-sync push\n
"},{"location":"Applications/PiHole/ha-setup/#automate-gravity-database-sync","title":"Automate gravity database sync","text":"Run the following command on both instances to create a systemd timer that will sync the gravity database every 5 minutes.
gravity-sync automate\n
You can check the status of the timer with systemctl status gravity-sync.timer
. And you can check the logs with journalctl -u gravity-sync.service
.
With gravity-sync automate hour
the timer will sync the gravity database every hour.
sudo apt update -y\nsudo apt install golang\n
Clone, compile & move the exporter to the correct location
git clone https://github.com/letsencrypt/unbound_exporter.git\ncd unbound_exporter\ngo build\nsudo install -o root -g root -m 0755 unbound_exporter /usr/local/bin/unbound-exporter\ncd ..\nrm -rf unbound_exporter\n
"},{"location":"Applications/PiHole/install-unbound-prometheus-exporter/#create-a-systemd-service","title":"Create a systemd service","text":"cat << EOF > /etc/systemd/system/unbound-exporter.service\n[Unit]\nDescription=Unbound Prometheus Exporter\nAfter=network.target\n\n[Service]\nType=simple\nUser=root\nGroup=root\nRestart=always\nExecStart=/usr/local/bin/unbound-exporter -web.listen-address \":9167\" -web.telemetry-path \"/metrics\"\n\n[Install]\nWantedBy=multi-user.target\nEOF\n
"},{"location":"Applications/PiHole/install-unbound-prometheus-exporter/#start-the-service","title":"Start the service","text":"sudo systemctl daemon-reload\nsudo systemctl enable --now unbound-exporter.service\n
"},{"location":"Applications/PiHole/install-unbound-prometheus-exporter/#test-the-exporter","title":"Test the exporter","text":"curl localhost:9167/metrics\n
"},{"location":"Blog/Misc/blog-gh-pages-mkdocs/","title":"How to create a blog with GitHub Pages and MkDocs","text":""},{"location":"Blog/Misc/blog-gh-pages-mkdocs/#dockerfile","title":"Dockerfile","text":"Create the Containerfile at Dockerfile
or Containerfile
.
FROM docker.io/ubuntu:focal\n\nRUN : \\\n && apt-get update -y \\\n && apt-get install -y --no-install-recommends \\\n python3 \\\n python3-venv \\\n python3-pip \\\n && rm -rf /var/lib/api/lists*\n\nWORKDIR /src\n\nCOPY requirements.txt .\nENV PATH = /venv/bin:$PATH\n\nRUN : \\\n && python3 -m venv /venv \\\n && python3 -m pip --no-cache-dir install -r requirements.txt\n\nCOPY . .\n\nWORKDIR /src/blog\n
"},{"location":"Blog/Misc/blog-gh-pages-mkdocs/#taskfile","title":"Taskfile","text":"To store some reoccuring tasks we use a Taskfile. To install Task use this link or just use sudo sh -c \"$(curl --location https://taskfile.dev/install.sh)\" -- -d -b /usr/local/bin
Create the Taskfile.yml
.
# https://taskfile.dev\n\nversion: \"3\"\n\nvars:\n CONTAINER_NAME: blog.rwxd.eu\n CURRENT_DIR:\n sh: pwd\n SITE_DIR: \"{{.CURRENT_DIR}}/docs/site\"\n\ntasks:\n default:\n cmds:\n - task -l\n silent: true\n\n setup:\n desc: Setup requirements\n cmds:\n - python3 -m pip install -r requirements.txt -q\n - pre-commit install\n silent: true\n\n image:\n desc: builds container image with name blog.rwxd.eu\n cmds:\n - podman build -t {{.CONTAINER_NAME}} -f ./Containerfile\n silent: true\n\n serve:\n desc: Serve blog with a container\n vars:\n PORT: 8000\n MOUNT: \"{{.CURRENT_DIR}}/src\"\n cmds:\n - task: image\n - podman run --rm -p {{.PORT}}:8000 -v ./:/src {{.CONTAINER_NAME}} mkdocs serve\n silent: true\n\n serve-local:\n desc: Serve blog local\n dir: ./blog\n cmds:\n - mkdocs serve\n silent: true\n\n build:\n desc: Build blog pages\n cmds:\n - task: image\n - mkdir -p {{.SITE_DIR}}\n - podman run --rm -v {{.SITE_DIR}}:/src/blog/site {{.CONTAINER_NAME}} sh -c \"mkdocs build\"\n
"},{"location":"Blog/Misc/pluralsight_trial/","title":"Pluralsight demo / trial","text":"Create a new Pluralsight Account with a one month demo trough Visual Studio Dev Essentials.
sudo apt install sane sane-utils sanebd\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#configuration","title":"Configuration","text":"Copy sane configuration to scanbd.
cp -r /etc/sane.d/* /etc/scanbd/sane.d/\n
Modify /etc/sane.d/dll.conf
so that only net
is uncommented in the configuration.
# genesys\nnet\n# canon\n
Test if the scanner is detected
SANE_CONFIG_DIR=/etc/scanbd scanimage -A\n
root@scanner:/opt/insaned# SANE_CONFIG_DIR=/etc/scanbd scanimage -L\ndevice 'genesys:libusb:001:004' is a Canon CanoScan 5600F flatbed scanner\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#start-enable-the-service","title":"Start & enable the service","text":"sudo systemctl start scanbd\nsudo systemctl enable scanbd\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#edit-the-button-configuration","title":"Edit the button configuration","text":"/etc/scanbd/scanbd.conf
The scan
action runs the test.script
per default. The path of the script or the content can be changed.
action scan {\n filter = \"^scan.*\"\n numerical-trigger {\n from-value = 1\n to-value = 0\n }\n desc = \"Scan to file\"\n script = \"/usr/local/bin/scan-to-share\"\n }\n
At the bottom
# devices\n# each device can have actions and functions, you can disable not relevant devices\ninclude(scanner.d/canon.conf)\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#debugging","title":"Debugging","text":"systemctl stop scanbd\nSANE_CONFIG_DIR=/etc/scanbd scanbd -f\n
More verbose:
systemctl stop scanbd\nSANE_CONFIG_DIR=/etc/scanbd scanbd -f -d7\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#scan-script","title":"Scan script","text":"#!/usr/bin/env bash\nset -x -e -o pipefail\n\nlog_file=\"/var/scans/scan.log\"\necho \"Starting script\" >> \"$log_file\"\n\nresolution=300\nfile_ending=jpg\nformat=jpeg\nmode=color\n\nfile_data=$(date +'%Y_%m_%d-%H_%M_%S')\nfilename=\"$file_data.$file_ending\"\ntemp_path=\"/tmp/$filename\"\ndest_path=\"/var/scans/scanned/$file_data.pdf\"\n\necho \"Destination path \\\"$dest_path\\\"\" >> \"$log_file\"\necho \"Starting scan with resolution $resolution, format $format & mode $mode\" >> \"$log_file\"\n\nexport SANE_CONFIG_DIR=/etc/scanbd\nscanimage --format \"$format\" --resolution=\"$resolution\" --mode \"$mode\" -v -p > \"$temp_path\"\nimg2pdf \"$temp_path\" -o \"$dest_path\"\nrm \"$temp_path\"\nchmod 777 \"$dest_path\"\n
"},{"location":"DevOps/Continuous-Integration/Ansible/ansible-runner/","title":"ansible-runner","text":""},{"location":"DevOps/Continuous-Integration/Ansible/ansible-runner/#usage","title":"Usage","text":"Run with docker as process isolation
ansible-runner run demo -m debug --hosts localhost -a msg=hello --container-image quay.io/ansible/awx-ee -vvvv --process-isolation --process-isolation-executable=docker
The molecule project helps to develop and test Ansible roles.
python3 -m pip install molecule
Generate a new role molecule init role <name>
Init in existing role molecule init scenario
List drivers molecule drivers
- name: Wait for port 22\n wait_for:\n host: \"{{ ansible_host }}\"\n port: 22\n state: started\n delay: 10\n sleep: 1\n connect_timeout: 5\n timeout: 900\n delegate_to: 127.0.0.1\n
"},{"location":"DevOps/Continuous-Integration/GitLab-CICD/clear_artifacts/","title":"Script to clear GitLab CI/CD Artifacts","text":"import requests\nimport json\n\nclass BearerAuth(requests.auth.AuthBase):\n def __init__(self, token):\n self.token = token\n def __call__(self, r):\n r.headers[\"authorization\"] = \"Bearer \" + self.token\n return r\n\nproject = '804'\ntoken='ijuiosjdiof'\n\nfor page in range(1, 200):\n url = f'https://gitlab.com/api/v4/projects/{project}/jobs?per_page=100&page={page}'\n print(f'Getting jobs from {url}')\n response = requests.get(url, auth=BearerAuth(token))\n\n data= json.loads(response.text)\n\n for item in data:\n url=f'https://gitlab.com/api/v4/projects/{project}/jobs/{item[\"id\"]}/artifacts'\n print(f'Running on {url}')\n response = requests.delete(url, auth=BearerAuth(token))\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/","title":"Docker commands","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#stop-things","title":"Stop things","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#stop-all-containers","title":"Stop all containers","text":"docker stop $(docker ps -aq)\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#remove-things","title":"Remove things","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#remove-all-containers","title":"Remove all containers","text":"docker rm $(docker ps -aq)\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#remove-stop-all-containers","title":"Remove & stop all containers","text":"docker rm -f $(docker ps -aq)\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#all-images","title":"All Images","text":"docker rmi $(docker images -q)\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#start-docker-daemon-in-debug-mode","title":"Start docker daemon in debug mode","text":"sudo dockerd --debug\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-ipv6/","title":"Docker with IPv6","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-ipv6/#setup","title":"Setup","text":"Test with a busybox container docker run --rm -it busybox sh
```Dockerfile
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/ssh-keys/#install-pip-requirements","title":"install pip requirements","text":"SHELL [\"/bin/bash\", \"-o\", \"pipefail\", \"-c\"] RUN : \\ && eval \"$(ssh-agent -s)\"\\ && mkdir -p /root/.ssh \\ && chmod 0700 /root/.ssh \\ && echo ${GITLAB_SSH_PRIVATE_KEY} | base64 -d >> /root/.ssh/id_rsa \\ && chmod 0700 /root/.ssh/id_rsa \\ && ssh-add /root/.ssh/id_rsa \\ && ssh-keyscan gitlab.com >> /root/.ssh/known_hosts \\ && chmod 0644 /root/.ssh/known_hosts \\ && python3 -m venv /venv \\ && python3 -m pip install --no-cache-dir -r requirements.txt \\ && rm -f /root/.ssh/id_rsa
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/ckad/","title":"CKAD - Certified Kubernetes Application Developer","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/ckad/#books","title":"Books","text":"curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/Install%20K3S%20on%20Raspberry%20PIs/#without-traefik","title":"Without Traefik","text":"curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC=\"--no-deploy traefik\" sh -s - --write-kubeconfig-mode 644\n
Get a token for the worker nodes
sudo cat /var/lib/rancher/k3s/server/token\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/Install%20K3S%20on%20Raspberry%20PIs/#worker-node","title":"Worker Node","text":"Set the K3S Token
export K3S_TOKEN=blablabla\n
curl -sfL https://get.k3s.io | K3S_URL=https://manager01.fritz.box:6443 K3S_TOKEN=$K3S_TOKEN sh -\n
Generate kubeconfig
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/Install%20K3S%20on%20Raspberry%20PIs/#create-a-service-account-for-kubectl","title":"Create a Service Account for kubectl","text":"kubectl -n default apply -f - <<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: home-computer\nEOF\n\nkubectl -n default apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: gitlab-service-account-role-binding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cluster-admin\nsubjects:\n - kind: ServiceAccount\n name: home-computer\n namespace: default\nEOF\n\nkubectl describe secret home-computer-token\n\nexport K8S_SERVER=\"https://192.168.2.31:6443\"\nexport K8S_CLUSTER=\"k3s-home\"\nexport K8S_USER=\"home-computer\"\nexport K8S_USER_TOKEN=\"blabla\" \n\nkubectl config set-cluster $K8S_CLUSTER --server=$K8S_SERVER --insecure-skip-tls-verify=true\nkubectl config set-credentials $K8S_USER --token=$K8S_USER_TOKEN\nkubectl config set-context $K8S_CLUSTER --cluster=$K8S_CLUSTER --user=$K8S_USER\nkubectl config use-context $K8S_CLUSTER\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/raspberry/","title":"K3s on Raspberry","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/raspberry/#errors","title":"Errors","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/raspberry/#failed-to-find-memory-cgroup-you-may-need-to-add","title":"Failed to find memory cgroup, you may need to add...","text":"Solution
sudo vim /boot/firmware/cmdline.txt\n
Add cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1
into end of the file.
The hybrid mode works for the control plane and kubernetes service https://kube-vip.io/hybrid/
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Networking/metallb/","title":"MetalLB","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Networking/metallb/#install-with-kubectl","title":"Install with kubectl","text":"https://metallb.universe.tf/installation/
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Networking/metallb/#config","title":"config","text":"apiVersion: v1\nkind: ConfigMap\nmetadata:\n namespace: metallb-system\n name: config\ndata:\n config: |\n address-pools:\n - name: default\n protocol: layer2\n addresses:\n - 192.168.3.200-192.168.3.250\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Networking/metallb/#install-with-terraform","title":"Install with Terraform","text":"resource \"kubernetes_namespace\" \"metallb\" {\n metadata {\n name = \"metallb\"\n }\n}\n\nresource \"helm_release\" \"metallb\" {\n name = \"metallb\"\n repository = \"https://metallb.github.io/metallb\"\n chart = \"metallb\"\n namespace = \"metallb\"\n\n depends_on = [kubernetes_namespace.metallb]\n\n set {\n name = \"configInline.address-pools[0].name\"\n value = \"default\"\n type = \"string\"\n }\n\n set {\n name = \"configInline.address-pools[0].protocol\"\n value = \"layer2\"\n type = \"string\"\n }\n\n set {\n name = \"configInline.address-pools[0].addresses[0]\"\n value = \"192.168.3.200-192.168.3.250\"\n type = \"string\"\n }\n}\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Tanzu-Kubernetes-Cluster/kubectl-vsphere/","title":"Kubectl vSphere","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Tanzu-Kubernetes-Cluster/kubectl-vsphere/#usage","title":"Usage","text":"Login to a cluster ``
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Tanzu-Kubernetes-Cluster/kubectl-vsphere/#links","title":"Links","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/kubectl/CronJob/","title":"Test CronJob","text":"kubectl create job --from=cronjob/<name> <new-pod-name>
kubectl create job --from=cronjob/check-job test-job-1
kubectl run -i --tty --rm debug --image=busybox --restart=Never -- sh
NAMESPACE=test && kubectl get pods -n $NAMESPACE | grep Error | cut -d' ' -f 1 | xargs kubectl delete pod -n $NAMESPACE\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/kubectl/label/","title":"nodes","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/kubectl/label/#add-node-label","title":"add node label","text":"kubectl label node node01 node-role.kubernetes.io/name
kubectl label node node01 node-role.kubernetes.io/name-
gitlab-service-account.yml with ClusterRoleBinding
---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: gitlab-service-account\n\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: gitlab-service-account-role-binding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cluster-admin\nsubjects:\n - kind: ServiceAccount\n name: gitlab-service-account\n namespace: default\n
gitlab-service-account.yml with RoleBinding
---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: gitlab-service-account\n namespace: <KUBE_NAMESPACE>\n\n---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: cicd-role\n namespace: <KUBE_NAMESPACE>\nrules:\n- apiGroups:\n - \"\"\n - apps\n - extensions\n resources:\n - '*'\n verbs:\n - '*'\n\n---\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: cicd-role\n namespace: <KUBE_NAMESPACE>\nsubjects:\n - kind: ServiceAccount\n name: gitlab-service-account\nroleRef:\n kind: Role\n name: cicd-role\n apiGroup: rbac.authorization.k8s.io\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/kubectl/serviceaccount/#get-the-created-token","title":"Get the created token","text":"kubectl -n <KUBE_NAMESPACE> describe secret gitlab-service-account-token-\n
export K8S_SERVER=\"https://10.24.1.1:6443\"\nexport K8S_CLUSTER=\"gitlab-test\"\nexport K8S_USER=\"gitlab-service-account\"\nexport K8S_USER_TOKEN=\"\"\n\nkubectl config set-cluster $K8S_CLUSTER --server=$K8S_SERVER --insecure-skip-tls-verify=true\nkubectl config set-credentials $K8S_USER --token=$K8S_USER_TOKEN\nkubectl config set-context $K8S_CLUSTER --cluster=$K8S_CLUSTER --user=$K8S_USER\nkubectl config use-context $K8S_CLUSTER\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/migrate-compose-to-kubefiles/","title":"Migrate from podman-compose to Kubefiles","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/migrate-compose-to-kubefiles/#overview","title":"Overview","text":"Kubefiles are a way to define a podman pod and containers in a single file. They are similar to docker-compose files, but can also be used with Kubernetes.
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/migrate-compose-to-kubefiles/#requirements","title":"Requirements","text":"The podman-compose or docker-compose file must be started with podman-compose up -d
and the created podman pod should be listed with podman pod ls
.
Get the pod name via podman pod ls
and generate the Kubefile with:
podman kube generate <pod_name> -f pod.kube.yaml\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/migrate-compose-to-kubefiles/#persistent-volume-claim","title":"Persistent Volume Claim","text":"Get the volume name via podman volume ls
and generate the Kubefile with:
podman kube generate <volume_name> -f pvc.kube.yaml\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-compose-systemd-files/","title":"Use systemd files with rootless podman-compose","text":"Currently (as of 6/15/2023), podman-compose must be manually installed to use version 1.0.7 (check with podman-compose -v), because pods are not used by default.
pip3 install git+https://github.com/containers/podman-compose.git\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-compose-systemd-files/#setup","title":"Setup","text":"Add the rootless podman user to the systemd-journal group to watch logs.
usermod -aG systemd-journal podman\n
Create the systemd podman-compose unit with root permissions
sudo podman-compose systemd --action create-unit\nsudo systemctl daemon-reload\n
Change to the directory where your podman-compose file resides.
Register the project
podman-compose systemd --action register\n\n# or with a different file name than podman-compose.yaml\npodman-compose -f docker-compose.yaml systemd --action register\n
Enable and start the systemd service
systemctl --user enable --now 'podman-compose@project-name'\n
Stop & Start
systemctl --user stop 'podman-compose@project-name'\nsystemctl --user start 'podman-compose@project-name'\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-compose-systemd-files/#troubleshooting","title":"Troubleshooting","text":"When the systemd unit is created you can use
podman pod ls\n\npodman pod inspect pod_project-name\n\nsystemctl --user status -l podman-compose@project-name\n\njournalctl --user -xu podman-compose@project-name\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/","title":"Podman Quadlets","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#pre-requisites","title":"Pre-requisites","text":"When using rootless podman a directory under the user's home directory must be created for the quadlet files.
mkdir -p ~/.config/containers/systemd\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#container","title":"Container","text":"A container quadlet file must end with .container
in the ~/.config/containers/systemd
directory.
Example quadlet file to run a deluge container (deluge.container
file):
[Install]\nWantedBy=default.target\n\n[Unit]\nAfter=mullvadvpn.service\n\n[Container]\nImage=docker.io/linuxserver/deluge:latest\nVolume=/opt/container/deluge/downloads/:/downloads\nVolume=/opt/container/deluge/config/:/cofnig\n\n[Service]\n# Restart service when sleep finishes\nRestart=always\n# Extend Timeout to allow time to pull the image\nTimeoutStartSec=900\n
All the options for the quadlet file can be found in the podman documentation.
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#start","title":"Start","text":"systemctl --user daemon-reload\nsystemctl --user start deluge\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#logs","title":"Logs","text":"podman logs systemd-deluge\n\njournactl -f | grep deluge\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#pods","title":"Pods","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/","title":"Podman rootless setup","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/#install-podman","title":"Install podman","text":"dnf install -y podman podman-docker\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/#enable-low-ports","title":"Enable low ports","text":"if ! grep -q \"net.ipv4.ip_unprivileged_port_start=80\" /etc/sysctl.conf; then echo \"net.ipv4.ip_unprivileged_port_start=80\" >> /etc/sysctl.conf; fi\n\n# Reload sysctl\nsysctl --system\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/#create-user","title":"Create user","text":"useradd -m -s /bin/bash container\nsudo -iu container\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/#create-podman-socket","title":"Create podman socket","text":"if ! grep -q \"loginctl enable-linger\" ~/.bashrc; then echo \"loginctl enable-linger $(whoami)\" >> ~/.bashrc; fi\nif ! grep -q \"$temp\" ~/.bashrc; then echo \"XDG_RUNTIME_DIR=/run/user/$(id -u)\" >> ~/.bashrc; fi\nsource ~/.bashrc\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/additional/Traefik/Excluding/","title":"Excluding","text":"services:\n whoami:\n image: \"traefik/whoami\"\n container_name: \"whoami-test\"\n labels:\n - \"traefik.enable=true\"\n - \"traefik.http.routers.whoami-test.rule=Host(`whoami-test.fritz.box`)\"\n - \"traefik.http.routers.whoami-test.entrypoints=http\"\n - \"traefik.http.routers.whoami-test.middlewares=intern_whitelist\"\n - \"traefik.http.middlewares.intern_whitelist.ipwhitelist.sourcerange=192.168.2.0/23\"\n - \"traefik.http.middlewares.intern_whitelist.ipwhitelist.ipstrategy.excludedips=192.168.2.1, 192.168.2.124\"\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/additional/Traefik/External%20Services/","title":"External Services","text":"http:\n routers:\n intern: {}\n entryPoints:\n - \"http\"\n - \"https\"\n rule: \"Host(`HostRegexp(`fritz.box`, `{subdomain:[a-z]+}.fritz.box`, ...)`)\"\n pihole:\n entryPoints:\n - \"http\"\n - \"https\"\n rule: \"Host(`pihole.fritz.box`)\"\n service: pihole\n middlewares:\n - addprefix-pihole\n services:\n pihole:\n loadBalancer:\n servers:\n - url: \"http://192.168.2.19:80\"\n passHostHeader: true\n middlewares:\n addprefix-pihole:\n addPrefix:\n prefix: \"/admin\"\n
"},{"location":"DevOps/Infrastructure-as-Code/ArgoCD/Setup%20Argo%20CD%20with%20a%20Helm%20Charts%20Repository/","title":"Install Argo CD","text":"Getting Started Guide
kubectl create namespace argocd \nkubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml\n
Download Argo CD CLI
Create a load balancer to use the API Server
kubectl patch svc argocd-server -n argocd -p '{\"spec\": {\"type\": \"LoadBalancer\"}}'\n
Get the initial admin secrets
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath=\"{.data.password}\" | base64 -d\n
"},{"location":"DevOps/Infrastructure-as-Code/ArgoCD/cli-cluster-login/","title":"Login with the ArgoCD Cli in the current cluster","text":""},{"location":"DevOps/Infrastructure-as-Code/ArgoCD/cli-cluster-login/#prerequisites","title":"Prerequisites","text":"# change the default namespace of your current context to argocd\nkubectl config set-context --current --namespace=argocd\n\nargocd login --core\n
Check for access to the API Server
argocd app list\n
"},{"location":"Math/Books/konkrete-mathematik-mit-python/","title":"Bearbeitung der Aufgaben von Konkrete Mathematik mit Python
","text":""},{"location":"Math/Books/konkrete-mathematik-mit-python/#15","title":"15","text":"def potenz(a: int, b: int) -> int:\n p = 1\n while b > 0:\n p *= a\n b -= 1\n return p\n\nassert potenz(2, 2) == 4\nassert potenz(3, 3) == 27\nassert potenz(3, 0) == 1\n
"},{"location":"Math/Books/konkrete-mathematik-mit-python/#17","title":"17","text":""},{"location":"Math/Discrete%20Math/Sets/","title":"Set-builder notation","text":"The set of all \\(x\\) in \\(S\\) such that \\(P(x)\\) is true -> \\(\\{ x \\in S | P(x) \\}\\)
"},{"location":"Misc/jwt-analyzing/","title":"Show content of a JWT token","text":"jwt=\"ey....\"\njq -R 'split(\".\") | .[1] | @base64d | fromjson' <<< \"$jwt\"\n
"},{"location":"Misc/openvpn-container-iptables-error/","title":"Problems with iptables in a OpenVPN container; leaking the real IP","text":"I was using the container \"dperson/openvpn-client:latest\" in combination with a deluge container. Which has the --net=container:vpn
option to use the same network stack as the vpn container.
By using the website https://ipleak.net/ I noticed that my real IP was leaking while testing the torrent client. The torrent client was listed with the VPN ip and the real ip. Using curl ipinfo.io
showed only the VPN ip.
The host is an AlmaLinux 9.2.
In the container logs where the following lines:
> docker logs vpn | grep \"ip\\dtables\"\ntables v1.8.4 (legacy): can't initialize iptables table `filter': Table does not exist (do you need to insmod?)\nPerhaps iptables or your kernel needs to be upgraded.\n
IP tables version on the host and in the container:
> iptables -V\niptables v1.8.8 (nf_tables)\n\n> docker exec vpn iptables -V\niptables v1.8.4 (legacy)\n
So the container was using the legacy iptables version. Also visible in the Dockerfile.
The nftables_nat modules are loaded, but the legacy iptables_nat modules are not.
> lsmod | grep nf_nat\nnf_nat 57344 3 xt_nat,nft_chain_nat,xt_MASQUERADE\n\n> lsmod | grep \"^ip\\w*table_nat\"\n
So we can load the legacy modules with modprobe.
> modprobe iptable_nat\n> modprobe ip6table_nat\n
> lsmod | grep \"^ip\\w*table_nat\"\n
Now the legacy modules are loaded and the error message is gone.
> docker restart vpn\n
Make the modules persistent.
touch /etc/modules-load.d/iptables_nat.conf\nprintf \"iptable_nat\\nip6table_nat\\n\" > /etc/modules-load.d/iptables_nat.conf\n
This solution also works when podman is used instead of docker.
"},{"location":"Networking/Cumulus%20Linux/about-cumulus-linux/","title":"About Cumulus Linux","text":""},{"location":"Networking/Misc/nfcapd/","title":"NFCAPD (NetFlow Capture Daemon)","text":""},{"location":"Networking/Misc/nfcapd/#show-running-captures","title":"Show running captures","text":"sudo ps -e -o command | grep nfcapd\n
"},{"location":"Networking/Misc/nfcapd/#edit-configuration","title":"Edit configuration","text":"Find the nfsen configuration first
sudo find / -type f -name \"nfsen.conf\"\n
vim /opt/etc/nfsen.conf\n
"},{"location":"Networking/Misc/nfcapd/#links","title":"Links","text":"The 95th percentile is a commonly used statistical measure to discard the top 5% of the highest values in a dataset. In network traffic calculation, it's a method used to determine the bandwidth billing rate, highlighting the regular usage while excluding rare usage spikes.
"},{"location":"Networking/Misc/traffic-billing/#how-it-works","title":"How it works","text":"def calculate_95th_percentile(data):\n data.sort() # Step 1: Sort the data\n index = 0.95 * len(data) # Step 2: Determine the 95th percentile index\n\n # Step 3: Get the value\n if index.is_integer():\n return data[int(index)-1] # Python indices are 0-based\n else:\n return data[int(round(index))-1]\n\n# Example data: Traffic measurements (in Mbps) every 5 minutes for a day (288 measurements for 24 hours)\ntraffic_data = [random.randint(50, 200) for _ in range(288)] # Random traffic data between 50 Mbps and 200 Mbps\n\npercentile_value = calculate_95th_percentile(traffic_data)\nprint(f\"95th Percentile Value: {percentile_value} Mbps\")\n
"},{"location":"Networking/NSX-T/NSX-T%20Links/","title":"NSX-T Links","text":"Network virtualization is the carving up of a single physical network into many virtual networks. Virtualizing a resource allows it to be shared by multiple users. Sharing allows the efficient use of a resource when no single user can utilize the entire resource.
Virtualization affords each user the illusion that they own the resource. In the case of virtual networks, each user is under the illusion that there are no other users of the network. To preserve the illusion, virtual networks are isolated from one another. Packets cannot accidentally leak from one virtual network to another.
"},{"location":"Networking/Virtualization/network-virtualization/#links","title":"Links:","text":"VXLAN (Virtual Extensible LAN) is a standard overlay protocol that abstracts logical virtual networks from the physical network underneath. With VXLAN simple and scalable layer 3 Clos architectures can be deployed, while extending layer 2 segments over that layer 3 network. VTEPs (VXLAN Tunnel Endpoints) are the tunnel edges.
VXLAN uses a VLAN-like encapsulation technique to encapsulate MAC-based layer 2 Ethernet frames within layer 3 UDP packets Each virtual network is a VXLAN logical layer 2 segment. VXLAN scales to 16 million segments - a 24-bit VXLAN network identifier (VNI ID) in the VXLAN header - for multi-tenancy.
In a large VXLAN deployment, two aspects need attention: 1. discovery of other endpoints (VTEPs) sharing the same VXLAN segments 2. avoidance of BUM frames (broadcast, unknown unicast and multicast) as they have to be forwarded to all VTEPs.
On Cumulus Linux each VXLAN is locally configured using a bridge for local virtual interfaces. The bridge is taking care of the local MAC addresses (notably, using source-address learning) and the VXLAN interface takes care of the remote MAC addresses (received with BGP EVPN).
"},{"location":"Networking/Virtualization/VXLAN/vxlan-general/#links","title":"Links:","text":"Extended IP access list TEST\n 2 permit ip host 10.10.10.1 host 10.10.10.2\n 3 permit ip host 10.10.10.3 host 10.10.10.4\n
"},{"location":"Networking/cisco/ios/acl/#command","title":"Command","text":"ip access-list resequence TEST 10 10\n
"},{"location":"Networking/cisco/ios/acl/#after","title":"After","text":"Extended IP access list TEST\n 10 permit ip host 10.10.10.1 host 10.10.10.2\n 20 permit ip host 10.10.10.3 host 10.10.10.4\n
"},{"location":"Networking/containerlab/cumulus/","title":"Cumulus on Containerlab","text":""},{"location":"Networking/containerlab/cumulus/#usage","title":"Usage","text":""},{"location":"Networking/containerlab/cumulus/#cvx","title":"CVX","text":"Container Image: docker.io/networkop/cx:5.1.0 Username: root Password: root
"},{"location":"Networking/containerlab/ssh/","title":"SSH into Containerlab devices","text":""},{"location":"Networking/containerlab/ssh/#ssh-config","title":"SSH Config","text":"$HOME/.ssh/config
host clab-*\n StrictHostKeyChecking no\n UserKnownHostsFile /dev/null\n
"},{"location":"Networking/vyos/install/","title":"Install vyos","text":""},{"location":"Networking/vyos/install/#download-live-image","title":"Download live image","text":"Nightly builds
"},{"location":"Networking/vyos/install/#install-permanent-on-disk","title":"Install permanent on disk","text":"vyos@vyos:~$ install image\n
"},{"location":"Operating-Systems/Android/apps/shelter/","title":"Shelter","text":"Shelter is a Free and Open-Source (FOSS) app that leverages the \"Work Profile\" feature of Android to provide an isolated space that you can install or clone apps into. https://github.com/PeterCxy/Shelter
"},{"location":"Operating-Systems/Android/apps/shelter/#links","title":"Links","text":"Install VcXsrv
Start XLaunch
with enabled clipboard and monitor 1
Set the Windows environment variable DISPLAY=\"127.0.0.1:1.0\"
Connect through SSH with the -Y
option.
Linux script to check working connection:
#!/usr/bin/env bash\n\nif ! timeout 3s xset q &>/dev/null; then\n echo \"No X server at \\$DISPLAY [$DISPLAY]\" >&2\n exit 1\nfi\n\necho \"Seems to work :)\"\n
"},{"location":"Operating-Systems/Linux/CLI/htpasswd/","title":"htpasswd","text":""},{"location":"Operating-Systems/Linux/CLI/htpasswd/#hash-bcrypt-with-input","title":"Hash BCrypt with input","text":"htpasswd -B -n username\n
"},{"location":"Operating-Systems/Linux/CLI/htpasswd/#run-with-a-container","title":"Run with a container","text":"docker run --rm -it httpd:latest htpasswd -B -n username\n
"},{"location":"Operating-Systems/Linux/CLI/iotop/","title":"iotop","text":""},{"location":"Operating-Systems/Linux/CLI/iotop/#watch-processes-accumulated","title":"Watch processes accumulated","text":"iotop -aoP\n
"},{"location":"Operating-Systems/Linux/CLI/nmap/","title":"NMAP","text":""},{"location":"Operating-Systems/Linux/CLI/nmap/#scan-network-for-host-with-open-ssh-port","title":"Scan network for host with open ssh port","text":""},{"location":"Operating-Systems/Linux/CLI/ssh/","title":"SSH","text":""},{"location":"Operating-Systems/Linux/CLI/ssh/#socks-proxy","title":"SOCKS Proxy","text":"ssh -D 1337 -C $USER@<target>\n
"},{"location":"Operating-Systems/Linux/CLI/tee/","title":"Tee","text":"With tee
it is possible to read from standard input and write to standard output and files (or commands) at the same time.
Log into file and stdout: foo | tee output.file
Append to a file: foo | tee -a output.file
Include stderr: foo 2>&1 | tee output.file
2>&1
redirects channel 2 (stderr/standard error) into channel 1 (stdout/standard output), such that both is written as stdout
Execute a program periodically, showing output in fullscreen.
"},{"location":"Operating-Systems/Linux/CLI/watch/#usage","title":"Usage","text":"watch du -sh file
Custom interval in seconds (defaults to every 2 seconds): watch -n 1 du -sh file
nmcli connection modify <name> connection.autoconnect yes\n
nmcli connection modify <name> 802-11-wireless-security.psk <psk>\n
nmcli connection up <name>\n
"},{"location":"Operating-Systems/Linux/Misc/Time/","title":"Time","text":""},{"location":"Operating-Systems/Linux/Misc/Time/#list-timezone","title":"List timezone","text":"timedatectl list-timezones
sudo timedatectl set-timezone Europe/Berlin
Install chronic
apt install moreutils\n
/etc/cron.d/01-example-cron
#!/usr/bin/env bash\nSHELL=/bin/bash\nPATH=/sbin:/bin:/usr/sbin:/usr/bin/usr/local/bin\nMAILTO=root,my-mail@example.org\n\n0 0 * * * root chronic /usr/local/bin/backup\n
"},{"location":"Operating-Systems/Linux/Misc/cryptsetup/","title":"Cryptsetup","text":"# format the disk with the luks structure\ncryptsetup luksFormat /dev/sda4\n\n# open the encrypted partition and map it to /dev/mapper/cryptroot\ncryptsetup luksOpen /dev/sda4 cryptroot\n\n# format as usual\nmkfs.ext4 -L nixos /dev/mapper/cryptroot\n
"},{"location":"Operating-Systems/Linux/Misc/window-names/","title":"Show window names","text":"Run the following command, after that click on a window to see its name
xprop | grep \"NAME\"\n
"},{"location":"Operating-Systems/Linux/Misc/window-names/#example","title":"Example","text":"\u276f xprop | grep \"NAME\"\nWM_NAME(STRING) = \"Spotify\"\n_NET_WM_NAME(UTF8_STRING) = \"Spotify\"\n
"},{"location":"Operating-Systems/Linux/Networking/Bridge%20Interface/","title":"Create a bridge interface","text":""},{"location":"Operating-Systems/Linux/Networking/Bridge%20Interface/#with-iproute2","title":"With iproute2","text":"Create a new bridge ip link add name bridge_name type bridge
Set interface to state up ip link set bridge_name up
Add an interface to the bridge (state of the interface must be up) ip link set eth0 master bridge_name
Verify bridge bridge link
Remove interace from a bridge ip link set eth0 nomaster
Edit file /etc/systemd/network/mybridge.network
[Match]\nName=br0\n\n[Network]\nDHCP=ipv4\n
Enable, start and reload systemd-networkd
sudo systemctl enable systemd-networkd\nsudo systemctl start systemd-networkd\nsudo systemctl reload systemd-networkd\n
"},{"location":"Operating-Systems/Linux/Networking/dns/","title":"DNS","text":""},{"location":"Operating-Systems/Linux/Networking/dns/#find-local-dns-resolver","title":"Find local DNS resolver","text":"sudo lsof -i :53 -S\n
"},{"location":"Operating-Systems/Linux/PulseAudio/Volume/","title":"PulseAudio Volume Stuff","text":""},{"location":"Operating-Systems/Linux/PulseAudio/Volume/#find-devices","title":"Find devices","text":"t=$(pacmd list-sinks && pacmd list-sinks && pacmd list-sources) && echo $t | grep \"name:\"
pacmd set-source-volume alsa_input.usb-Burr-Brown_from_TI_USB_Audio_CODEC-00.analog-stereo 0x25000
Install blueman
Launch the graphical settings with blueman-manager
put the following configuration into ~/.Xresources
Xft.dpi: 150\n
load settings
xrdb -merge ~/.Xresources\nexec i3\n
"},{"location":"Operating-Systems/Linux/Window-Manager/i3/i3-wallpaper/","title":"Wallpaper in i3","text":"feh can be used to display a wallpaper.
Define the following in the i3 config file to use a random wallpaper from the path ~/wallpaper/
.
exec --no-startup-id feh --bg-scale --random ~/wallpaper/\n
"},{"location":"Operating-Systems/Linux/Window-Manager/i3/spotify/","title":"Control Spotify in i3","text":"# spotify player controls\nbindsym XF86AudioPlay exec \"dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.PlayPause\"\nbindsym XF86AudioStop exec \"dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Stop\"\nbindsym XF86AudioPrev exec \"dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Previous\"\nbindsym XF86AudioNext exec \"dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Next\"\n
"},{"location":"Operating-Systems/Linux/Window-Manager/i3/volume/","title":"Volume Control in i3","text":"A graphical control for PulseAudio is pavucontrol
.
# Use pactl to adjust volume in PulseAudio.\nset $refresh_i3status killall -SIGUSR1 i3status\nbindsym XF86AudioRaiseVolume exec --no-startup-id pactl set-sink-volume @DEFAULT_SINK@ +5% && $refresh_i3status\nbindsym XF86AudioLowerVolume exec --no-startup-id pactl set-sink-volume @DEFAULT_SINK@ -5% && $refresh_i3status\nbindsym XF86AudioMute exec --no-startup-id pactl set-sink-mute @DEFAULT_SINK@ toggle && $refresh_i3status\nbindsym XF86AudioMicMute exec --no-startup-id pactl set-source-mute @DEFAULT_SOURCE@ toggle && $refresh_i3status\n
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/","title":"nixpkgs","text":""},{"location":"Operating-Systems/Linux/nix/nixpkgs/#get-github-checksums","title":"Get GitHub checksums","text":"nix-prefetch-url --unpack https://github.com/catppuccin/bat/archive/f0dedf515c02799b76a2804db9815a479f6c0075.zip\n
REPO=\"\"\n
rm -rf /tmp/repo-check\ngit clone --depth 1 \"$REPO\" /tmp/repo-check\ngit -C /tmp/repo-check rev-parse HEAD\nrm -rf /tmp/repo-check/.git\nnix hash path /tmp/repo-check\n
fetchFromGitHub {\n owner = \"owner\";\n repo = \"repo\";\n rev = \"65bb66d364e0d10d00bd848a3d35e2755654655b\";\n hash = \"sha256-8EUDsWeTeZwJNrtjEsUNLMt9I9mjabPRBZG83u7xtPw=\";\n}\n
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/#build","title":"Build","text":"nix-build -E 'with import <nixpkgs> {}; callPackage ./default.nix {}'\n
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/#test-install","title":"Test-Install","text":"nix-env -iA <package> -f <path to repo>\n
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/#submitting-changes","title":"Submitting Changes","text":"https://nixos.org/manual/nixpkgs/stable/#chap-submitting-changes
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/#maintainer","title":"Maintainer","text":"Add yourself to the nixpkgs/maintainers/maintainer-list.nix
file.
Format
handle = {\n # Required\n name = \"Your name\";\n email = \"address@example.org\";\n # Optional\n matrix = \"@user:example.org\";\n github = \"GithubUsername\";\n githubId = your-github-id;\n keys = [{\n longkeyid = \"rsa2048/0x0123456789ABCDEF\";\n fingerprint = \"AAAA BBBB CCCC DDDD EEEE FFFF 0000 1111 2222 3333\";\n }];\n};\n
"},{"location":"Operating-Systems/Linux/security/selinux/","title":"SELinux","text":""},{"location":"Operating-Systems/Linux/security/selinux/#commands","title":"Commands","text":"See SELinux booleans
getsebool -a\n
Get messages since 14:05
journalctl -t setroubleshoot --since=14:05\n
"},{"location":"Operating-Systems/Linux/security/selinux/#inspection","title":"Inspection","text":"Inspect a AVC message
sealert -l [message_ID]\n
"},{"location":"Operating-Systems/Linux/security/selinux/#flags","title":"Flags","text":"chcon\nrestorecron\n
"},{"location":"Operating-Systems/Linux/security/firewall/firewalld/","title":"firewalld","text":""},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#zones","title":"Zones","text":""},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#list-zones","title":"List Zones","text":"firewall-cmd --get-active-zones\n
"},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#rules","title":"Rules","text":""},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#ports","title":"Ports","text":"firewall-cmd --permanent --zone=public --add-port=25565/tcp --add-port=19132/udp\n
Port Range
firewall-cmd --permanent --zone=public --add-port=40000-40030/udp\n
"},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#remove-ports","title":"Remove Ports","text":"firewall-cmd --permanent --zone=public --remove-port=25565/tcp --remove-port=19132/udp\n
Port Range
firewall-cmd --permanent --zone=public --remove-port=40000-40030/udp\n
"},{"location":"Operating-Systems/Linux/security/firewall/ufw/","title":"UFW","text":""},{"location":"Operating-Systems/Linux/security/firewall/ufw/#get-status","title":"Get status","text":"ufw status verbose\n
"},{"location":"Operating-Systems/Linux/security/firewall/ufw/#rules","title":"Rules","text":""},{"location":"Operating-Systems/Linux/security/firewall/ufw/#ports","title":"Ports","text":"ufw allow 22/tcp\n````\n\n```bash\nufw deny 80/tcp\n````\n\n### Remove Ports\n\n```bash\nufw delete allow 22/tcp\n
"},{"location":"Operating-Systems/Linux/security/firewall/ufw/#block-all-incoming-traffic","title":"Block all incoming traffic","text":"ufw default deny incoming\n
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/","title":"20 rules of formulating knowledge","text":"Effective learning: Twenty rules of formulating knowledge
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#1-do-not-learn-if-you-dont-understand","title":"1. Do not learn if you don't understand","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#2-learn-before-you-memorize","title":"2. Learn before you memorize","text":"You need to build an overall picture of the learned knowledge in order to memorize it. Do not start from memorizing loosely related facts!
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#3-build-upon-the-basics","title":"3. Build upon the basics","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#4-stick-to-the-minimum-information-principle","title":"4. Stick to the minimum information principle","text":"The material you learn must be formulated in as simple way at is is.
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#5-cloze-deletion-is-easy-and-effective","title":"5. Cloze deletion is easy and effective","text":"Cloze deletion is a sentence with its parts missing and replaced by three dots.
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#6-use-imagery","title":"6. Use Imagery","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#7-use-mnemonic-techniques","title":"7. Use mnemonic techniques","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#8-graphic-deletions-is-as-good-as-cloze-deletion","title":"8. Graphic deletions is as good as cloze deletion","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#9-avoid-sets","title":"9. Avoid sets","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#10-avoid-enumeration","title":"10. Avoid enumeration","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#11-combat-interference","title":"11. Combat interference","text":"When you learn about similar things you often confuse them. You need to make items as unambiguous as possible.
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#12-optimized-wording","title":"12. Optimized wording","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#13-refer-to-other-memories","title":"13. Refer to other memories","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#14-personalize-and-provide-examples","title":"14. Personalize and provide examples","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#15-rely-on-emotional-states","title":"15. Rely on emotional states","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#16-context-cues-simplify-wording","title":"16. Context cues simplify wording","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#17-redundancy-does-not-contradict-minimum-information-principle","title":"17. Redundancy does not contradict minimum information principle","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#18-provide-sources","title":"18. Provide sources","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#19-provide-data-stamping","title":"19. Provide data stamping","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#20-prioritize","title":"20 Prioritize","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#summary","title":"Summary","text":"import json\nimport urllib.request as requests\n\nconfig = {\n \"token\": \"blabla\",\n \"duck_domain\": \"cloud-test\",\n \"ipv4\": True,\n \"ipv6\": True\n }\n\nipv4URL = 'https://ipv4.ipleak.net/json/'\nipv6URL = 'https://ipv6.ipleak.net/json/'\n\nif config[\"ipv4\"]:\n request = requests.urlopen(ipv4URL)\n data = json.load(request)\n print(f'IPv4: {json.dumps(data[\"ip\"], indent=2)}')\n\n request = requests.urlopen(f'https://www.duckdns.org/update?domains={config[\"duck_domain\"]}&token={config[\"token\"]}&ip={data[\"ip\"]}')\n if request.status != 200:\n print(request.msg)\n\n\nif config[\"ipv6\"]:\n request = requests.urlopen(ipv6URL)\n data = json.load(request)\n print(f'IPv6: {json.dumps(data[\"ip\"], indent=2)}')\n\n request = requests.urlopen(f'https://www.duckdns.org/update?domains={config[\"duck_domain\"]}&token={config[\"token\"]}&ipv6={data[\"ip\"]}')\n if request.status != 200:\n print(request.msg)\n
"},{"location":"Other/digital-gardening/","title":"Digital Gardening","text":""},{"location":"Other/digital-gardening/#links","title":"Links","text":"A stack is a data structure used to store a collection of objects. Individual items can be added and stored in a stack using a push operation. Objects can be retrieved using a pop operation, which removes an item from the stack.
When an object is added to a stack, it is placed on the top of all previously entered items.
A stack in which items are removed from the top is considered a \"LIFO\" - Last in, First Out. In a \"FIFO\" - First In, First Out items are removed from the bottom.
"},{"location":"Programming-Languages/Bash/","title":"Bash","text":""},{"location":"Programming-Languages/Bash/#notes","title":"Notes","text":"#!/usr/bin/env bash
.~/.bashrc
doesn't have to be a mess{\n \"my_key\": \"my_string\" \n}\n
"},{"location":"Programming-Languages/JSON/#integer-and-floats","title":"Integer and floats","text":"{\n \"my_int\": 2,\n \"my_float\": 3.5\n}\n
"},{"location":"Programming-Languages/JSON/#array","title":"Array","text":"{\n \"my_list\": [\"test\", 5, \"test3\"]\n}\n
"},{"location":"Programming-Languages/JSON/#objects","title":"Objects","text":"{\n \"my_object\": {\n \"name\": \"Test Object\",\n \"childs\": [\n {\n \"name\": \"Child object 1\"\n }\n ]\n }\n}\n
"},{"location":"Programming-Languages/JSON/#links","title":"Links","text":""},{"location":"Programming-Languages/YAML/","title":"YAML","text":"YAML is commonly used for configuration files and in applications where data is being stored or transmitted.
Filenames can end with .yaml
or .yml
.
---\nkey: this is a string\n\nkey: \"this is also a string\"\n\nkey: |\n this is a multi-line\n string with line breaks\n\nkey: >\n this a multi-line \n string withouth line breaks\n
"},{"location":"Programming-Languages/YAML/#integers-and-floats","title":"Integers and floats","text":"---\ninteger: 595\n\nfloat: 12.2\n
"},{"location":"Programming-Languages/YAML/#lists","title":"Lists","text":"---\nlist1: [1, \"two\", 3]\n\nlist2:\n - 1\n - \"two\"\n - 3\n
"},{"location":"Programming-Languages/YAML/#objects","title":"Objects","text":"---\nmy_obj:\n title: My Object\n description: This is a object\n childs:\n - test_obj:\n name: Test Object\n
"},{"location":"Programming-Languages/YAML/#comments","title":"Comments","text":"---\n# this is a comment\n
"},{"location":"Programming-Languages/YAML/#links","title":"Links","text":"GOOS=linux GOARCH=amd64 go build -v\n
"},{"location":"Programming-Languages/Go/Build/#windows","title":"Windows","text":"GOOS=windows GOARCH=amd64 go build -v\n
"},{"location":"Programming-Languages/Go/Build/#helpful-makefile","title":"Helpful Makefile","text":"PROJECT_NAME := \"test-project\"\nPKG := \"github.com/rwxd/$(PROJECT_NAME)\"\nPKG_LIST := $(shell go list ${PKG}/...)\nGO_FILES := $(shell find . -name '*.go' | grep -v _test.go)\n\nhelp:\n @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\nall: build\n\ntest: ## Test go code\n @go test -race ./...\n\ndep: ## Get the dependencies\n @go get -v -d ./...\n\nsetup: ## Install required things\n python3 -m pip install -r requirements-dev.txt\n pre-commit install\n\nbuild: dep build-linux build-windows ## Build for all platforms\n\nbuild-linux: dep ## Build for linux\n @mkdir -p build/\n @GOOS=linux GOARCH=amd64 go build -o build/ -v $(PKG)\n\nbuild-windows: dep ## Build for windows\n @mkdir -p build/\n @GOOS=windows GOARCH=amd64 go build -v -o build/ $(PKG)\n\nclean: ## Remove previous build\n @rm -rf build/\n
"},{"location":"Programming-Languages/Go/Concurrency/","title":"Concurrency in Go","text":""},{"location":"Programming-Languages/Go/Concurrency/#mutex","title":"Mutex","text":"Safely access data across multiple goroutines
func editFile(path string, mu *sync.Mutex){\n mu.Lock()\n defer mu.Unlock()\n // I/O stuff\n}\n
"},{"location":"Programming-Languages/Go/Formatting/","title":"Formatting in Go","text":"The Go development tools include a command, go fmt
, which automatically reformats your code to match the standard format.
import (\n \"runtime/pprof\"\n)\n\nfunc main {\n f, err := os.Create(\"my-tool.prof\")\n if err != nil {\n log.Fatal(err)\n }\n pprof.StartCPUProfile(f)\n defer pprof.StopCPUProfile()\n\n // CPU Intensive code\n}\n
"},{"location":"Programming-Languages/Go/profiling/#view-data","title":"View data","text":"go tool pprof my-tool.prof\n
# view top 10 functions\n(pprof) top\n\n# view top 20 functions\n(pprof) top20\n\n# view top 10 functions in a graph\n(pprof) top --cum\n\n# Visualize graph through web browser\n(pprof) web\n\n# Output graph as a svg\n(pprof) svg\n
"},{"location":"Programming-Languages/Go/profiling/#memory-profiling","title":"Memory Profiling","text":"Go comes with a built-in profiling tool called pprof that can provide detailed information about your application's runtime memory usage.
"},{"location":"Programming-Languages/Go/profiling/#generate-data_1","title":"Generate data","text":"import _ \"net/http/pprof\"\n
Then, add the following code to start a new HTTP server that will serve the pprof endpoints:
go func() {\n log.Println(http.ListenAndServe(\"localhost:6060\", nil))\n}()\n
With the above setup, you can access various profiling data by navigating to http://localhost:6060/debug/pprof/ while your application is running. For memory-related insights, http://localhost:6060/debug/pprof/heap is of particular interest.
"},{"location":"Programming-Languages/Go/profiling/#capture-heap-dump","title":"Capture Heap Dump","text":"Once you have pprof set up and your application is running:
Allow your application to run until you suspect a memory leak. Capture a heap profile by executing:
curl -s http://localhost:6060/debug/pprof/heap -o mem.pprof\n
"},{"location":"Programming-Languages/Go/profiling/#analyze-data","title":"Analyze data","text":"go tool pprof mem.pprof\n
python3 -m cProfile -o log.pstats -m my_module\n
"},{"location":"Programming-Languages/Python/Performance%20Analysis/#visualisation","title":"Visualisation","text":""},{"location":"Programming-Languages/Python/Performance%20Analysis/#gprof2dot-dot-diagram","title":"gprof2dot (Dot Diagram)","text":"sudo pacman -S graphviz\npip3 install gprof2dot\n
gprof2dot -f pstats log.pstats | dot -Tsvg -o log.svg\n
"},{"location":"Programming-Languages/Python/profiling/","title":"Profiling in Python","text":""},{"location":"Programming-Languages/Python/profiling/#generate-data","title":"Generate data","text":""},{"location":"Programming-Languages/Python/profiling/#pythons-integrated-cprofile","title":"Pythons integrated cProfile
","text":"python3 -m cProfile -o profile.pstats -m my_module <args>\n
"},{"location":"Programming-Languages/Python/profiling/#yappi","title":"Yappi","text":"Yappi supports asynchronous and multithreaded profiling, which is not supported by the built-in profiler.
pip3 install -U yappi\n
import yappi\nfrom my_module import my_function\n\nyappi.start()\n\nmy_function()\n\nyappi.stop()\n\nyappi.get_func_stats().save(\"profile.pstats\", type=\"pstats\")\n
"},{"location":"Programming-Languages/Python/profiling/#visualisation","title":"Visualisation","text":""},{"location":"Programming-Languages/Python/profiling/#gprof2dot-dot-diagram-svg","title":"gprof2dot (Dot Diagram, SVG)","text":"Transform a .pstats
file with gprof2dot into a dot graph as a svg file.
pip3 install -U gprof2dot\n
gprof2dot -f pstats profile.pstats | dot -Tsvg -o profile.svg\n
"},{"location":"Programming-Languages/Python/profiling/#snakeviz-interactive","title":"Snakeviz (Interactive)","text":"Snakeviz is a web-based profiling tool which allows users to analyse their code by filtering data by module, function and file, and sorting it according to different criteria such as the number of calls or cumulative time spent in a function.
pip3 install -U snakeviz\n
snakeviz profile.pstats\n
"},{"location":"Programming-Languages/Python/profiling/#flamegraph-svg","title":"flamegraph (SVG)","text":"Flame graphs are visual tools that show how much time is spent in each function call. The width of each bar in the graph represents the amount of time spent in that function, with wider bars indicating more time spent and narrower bars indicating less time. The main function is at the bottom, and the subfunctions are stacked vertically on top.
pip3 install -U flameprof\n
flameprof profile.pstats > profile.svg\n
"},{"location":"Programming-Languages/Python/typing-in-python/","title":"Typing in Python","text":"In Python typing can be optionally used. To check typing the standard tool is MyPy.
"},{"location":"Programming-Languages/Python/typing-in-python/#usage","title":"Usage","text":""},{"location":"Programming-Languages/Python/typing-in-python/#function-annotations","title":"Function annotations","text":"def func(arg: arg_type, optarg: arg_type = default) -> return_type: \n...\n
For arguments the syntax is argument: annotation
, while the return type is annotated using -> annotation
. Note that the annotation must be a valid Python expression.
Sometimes the type checker needs help in figuring out the types of variables as well. The syntax is similar:
pi: float = 3.142\n\ndef circumference(radius: float) -> float:\n return 2 * pi * radius`\n
"},{"location":"Programming-Languages/Python/typing-in-python/#links","title":"Links","text":"Create a virtual environment python3 -m virtualenv .venv
or python3 -m venv .venv
Get current path
from pathlib import Path\nSTATIC_FILES_DIR = Path(__file__).parent.absolute()\n
"},{"location":"Programming-Languages/Python/Python-Libraries/Pathlib/#links","title":"Links","text":""},{"location":"Programming-Languages/Python/Python-Libraries/PyTest/","title":"PyTest","text":""},{"location":"Programming-Languages/Python/Python-Libraries/PyTest/#mocks","title":"Mocks","text":"For mocking with PyTest the unittest.mock
library is used.
import json\nimport pytest\n\n@pytest.fixture\ndef json_loader():\n \"\"\"Loads data from JSON file\"\"\"\n\n def _loader(filename):\n with open(filename, 'r') as f:\n print(filename)\n data = json.load(f)\n return data\n\n return _loader\n\n\ndef test_wrong_stop(client, mocker, json_loader):\n # Arrange\n get_mock = mocker.MagicMock()\n get_mock.status_code = 200\n get_mock.json.return_value = json_loader(\n cta_error_incorrect_stop_response.json)\n mocker.patch.object(\n backend.cta.requests,\n 'get',\n return_value=get_mock,\n )\n\n # Act\n response = client.simulate_get('/stops/106')\n\n # Assert\n assert response.status == falcon.HTTP_200\n assert response.json == {'error': 'stop_id: 106 does not exist\n
"},{"location":"Programming-Languages/Python/Python-Libraries/PyTest/#links","title":"Links","text":"https://github.com/mkb79/audible-cli
"},{"location":"Tools/Audible-Cli/#setup","title":"Setup","text":""},{"location":"Tools/Audible-Cli/#authfile","title":"Authfile","text":"audible manage auth-file add --password \"<password>\"\n
"},{"location":"Tools/Audible-Cli/#profile","title":"Profile","text":"audible manage profile add\n
"},{"location":"Tools/Audible-Cli/#download-all-audio-books-to-the-current-directory","title":"Download all audio books to the current directory","text":"audible -P default -v info download --all --aax --ignore-podcasts --jobs 3 --ignore-errors\n
"},{"location":"Tools/Audible-Cli/#convert-aax-to-mp3","title":"Convert aax to mp3","text":"https://github.com/KrumpetPirate/AAXtoMP3
"},{"location":"Tools/Audible-Cli/#get-the-auth-token-from-audible-cli","title":"Get the auth token from audible-cli","text":"audible -P default activation-bytes\n
"},{"location":"Tools/Audible-Cli/#convert-aax-to-mp3_1","title":"Convert aax to mp3","text":"aaxtomp3 -e:mp3 --level 5 -s --authcode <authcode> --loglevel 1 <file.aax>\n
"},{"location":"Tools/Audible-Cli/#convert-all-aax-to-mp3","title":"Convert all aax to mp3","text":"find . -name \"*.aax\" -exec aaxtomp3 -e:mp3 --level 5 -s --authcode <authcode> --loglevel 1 --complete_dir <path> {} \\;\n
"},{"location":"Tools/Bitwarden/","title":"Bitwarden","text":"Bitwarden is a open source password manager with cloud synchronization and the option to host the sync server on your own hardware.
"},{"location":"Tools/Bitwarden/#links","title":"Links","text":"Use Jinja2 templating in boilerplates for new projects.
"},{"location":"Tools/Cookiecutter/#usage","title":"Usage","text":"python3 -m pip install cookiecutter\ncookiecutter gh:rwxd/cookiecutter-gh-project\n
"},{"location":"Tools/HashiCorp-Vault/","title":"HashiCorp Vault","text":"HashiCorp Vault can be used to store things like passwords, certificates and encryption keys.
"},{"location":"Tools/HashiCorp-Vault/#usage","title":"Usage","text":""},{"location":"Tools/HashiCorp-Vault/#cli","title":"CLI","text":"Login to a vault server with a token vault login -address=https://vault.net -method=token
List kv entries vault kv list network/services
Get a kv entry vault get network/services/ipam
Headscale is a self-hosted, open source implementation of the Tailscale control server.
"},{"location":"Tools/Headscale/#connect-a-client-to-the-server","title":"Connect a client to the server","text":""},{"location":"Tools/Headscale/#create-a-user","title":"Create a user","text":"In case you don't have a user yet, you can create one with the following command:
headscale users create <user>\n
"},{"location":"Tools/Headscale/#get-an-authkey-for-the-user","title":"Get an authkey for the user","text":"headscale --user <user> preauthkeys create --reusable --expiration 1h\n
"},{"location":"Tools/Headscale/#authenticate-tailscale-client","title":"Authenticate tailscale client","text":"tailscale up --login-server <headscale url> --authkey <authkey>\n
"},{"location":"Tools/Headscale/#check-status","title":"Check status","text":"tailscale status\n
"},{"location":"Tools/KeepassXC/","title":"KeePassXC","text":"KeePassXC is a open source password manager which uses a local password database file.
To sync the database with different devices an external cloud service like Dropbox or OneDrive is needed.
KeePassXC has the functionality to store SSH keys and inject the keys into the SSH agent.
"},{"location":"Tools/KeepassXC/#links","title":"Links","text":"MkDocs is a fast, simple and downright gorgeous static site generator that's geared towards building project documentation. Documentation source files are written in Markdown, and configured with a single YAML configuration file. -- https://www.mkdocs.org/
"},{"location":"Tools/MkDocs/#usage","title":"Usage","text":"View the website local with: mkdocs serve
.
Build the website with: mkdocs build
.
The default export directory is ./site
.
Task is a runner / built tool.
The configuration is written in a Taskfile.yml
Taskfile Template
# https://taskfile.dev \n\nversion: '3' \n\nvars: \nGREETING: Hello, World! \n\ntasks: \ndefault: \ncmds: \n- echo \"{{.GREETING}}\" \nsilent: true\n
"},{"location":"Tools/Task/#usage","title":"Usage","text":"Init a Taskfile template task --init
List tasks task -l
or task --list
Use vars at global or task level
vars:\n CONTAINER_NAME: wiki.rwxd.eu\n CURRENT_DIR:\n sh: pwd\n SITE_DIR: \"{{.CURRENT_DIR}}/site\"\n
"},{"location":"Tools/Task/#links","title":"Links","text":""},{"location":"Tools/autorestic/","title":"autorestic - High backup level CLI utility for restic.","text":"Documentation
The commands will work with the configuration saved to ~/.autorestic.yaml
you can also specify a different config file with the -c
flag.
The --ci
flag is used for the exec
command to prevent colors from being printed.
autorestic check\n
"},{"location":"Tools/autorestic/#backup","title":"Backup","text":"# all\nautorestic backup --all\n\n# specific locations\nautorestic backup --locations \"<location1>,<location2>\"\n
"},{"location":"Tools/autorestic/#show-stats-for-a-backend","title":"Show stats for a backend","text":"autorestic --ci exec -vb <backend> stats\n
"},{"location":"Tools/autorestic/#show-snapshots-for-a-backend","title":"Show snapshots for a backend","text":"autorestic --ci exec -vb <backend> snapshots\n
"},{"location":"Tools/autorestic/#check-a-backend-for-errors","title":"Check a backend for errors","text":"autorestic --ci exec -vb <backend> check\n
"},{"location":"Tools/autorestic/#mount-repository-on-backend","title":"Mount repository on backend","text":"mkdir -p /mnt/restic\nautorestic --ci exec -vb <backend> mount -- /mnt/restic\n
"},{"location":"Tools/borg/","title":"Borg","text":""},{"location":"Tools/borg/#delete-directory-from-all-backups","title":"Delete directory from all backups","text":"Dry-Run
borg recreate <archive> --dry-run --list --verbose --exclude <path>\n
Delete
borg recreate <archive> --list --verbose --exclude <path>\n
"},{"location":"Tools/cht.sh/","title":"cht.sh","text":""},{"location":"Tools/cht.sh/#links","title":"Links","text":"sequenceDiagram\nAlice->>John: Hello John, how are you?\nloop Healthcheck\n John->>John: Fight against hypochondria\nend\nNote right of John: Rational thoughts!\nJohn-->>Alice: Great!\nJohn->>Bob: How about you?\nBob-->>John: Jolly good!\n
"},{"location":"Tools/openssl/","title":"Open SSL","text":""},{"location":"Tools/openssl/#generate-passwords","title":"Generate passwords","text":"openssl passwd -6 -salt xyz
Arch yay -S spicetify-cli
generate config spicetify
apply config
spicetify backup apply\nspicetify apply\n
change theme spicetify config current_theme THEME_NAME
change color scheme spicetify config color_scheme SCHEME_NAME
when Spotify is installed through AUR
sudo chmod a+wr /opt/spotify\nsudo chmod a+wr /opt/spotify/Apps -R\n
"},{"location":"Tools/spicetify/#links","title":"Links","text":"With BFG large or troublesome files can be removed from a Git Repository
The Git repo should be cloned with --mirror
Delete a file in a Git repository and force push the new commit history.
bfg --delete-files file.md\ngit reflog expire --expire=now --all && git gc --prune=now --aggressive\ngit push --force\n
"},{"location":"Tools/Git/bfg-repo-cleaner/#secrest","title":"Secrest","text":"A file with a list of secrets can be used to remove all occurrences in the git repository
leaked-passwords.txt
PASSWORD1 # Replace literal string 'PASSWORD1' with '***REMOVED***' (default)\nPASSWORD2==>examplePass # replace with 'examplePass' instead\nPASSWORD3==> # replace with the empty string\nregex:password=\\w+==>password= # Replace, using a regex\nregex:\\r(\\n)==>$1 # Replace Windows newlines with Unix newlines\n
bfg --replace-text leaked-passwords.txt\n
git reflog expire --expire=now --all && git gc --prune=now --aggressive\n
"},{"location":"Tools/Git/git-crypt/","title":"Git Crypt","text":""},{"location":"Tools/Git/git-crypt/#how-to","title":"How to","text":""},{"location":"Tools/Git/git-crypt/#init","title":"Init","text":"Initialize repository with git-crypt init
Create a .gitattributes
file
touch .gitattributes\n
The .gitattatributes
file contains lines in the following form:
[file pattern] attr1=value1 attr2=value2\n
"},{"location":"Tools/Git/git-crypt/#example","title":"Example","text":"If we want to encrypt the file config.yml
, the .gitattatributes
should contain the following:
config.yml filter=git-crypt diff=git-crypt\n
With git-crypt status
we can see that our file will be encrypted on push to our remote repository.
\u276f git-crypt status | grep \"config.yml\"\n encrypted: config.yml\n
"},{"location":"Tools/Git/git-crypt/#locking","title":"Locking","text":"With git-crypt lock
and git-crypt unlock
the repository can be unlocked at will.
git-crypt add-gpg-user KEYID
docker run -v \"$PWD\":/path ghcr.io/zricethezav/gitleaks:v8.8.12 detect -f json -r \"/path/report-secrets.json\" --source=\"/path\"\n
Extract unique secrets to extracted-secrets
cat report-secrets.json | jq -n -r 'inputs[].Secret' | sort -u > extracted-secrets\n
"},{"location":"Tools/Git/gitleak/#clear-secrets-from-repository","title":"Clear secrets from repository","text":"Use (bfg)[../bfg-repo-cleaner.md]
Prepare with:
bfg --replace-text extracted-secrets\n
Clean secrets with:
git reflog expire --expire=now --all && git gc --prune=now --aggressive\n
"},{"location":"Tools/Git/GitLab/access-tokens/","title":"GitLab Access Tokens","text":""},{"location":"Tools/Git/GitLab/access-tokens/#clone","title":"Clone","text":"Clone with an access token git clone https://$project_name:$token@$gitlab/$project_path.git
Use in init.vim
let g:coc_global_extensions = [\n \\ 'coc-pyright',\n \\ 'coc-prettier',\n \\ 'coc-git',\n \\ 'coc-json',\n \\ 'coc-docker',\n \\ 'coc-yaml',\n \\ 'coc-html',\n \\ 'coc-sh',\n \\ 'coc-go',\n \\ '@yaegassy/coc-ansible',\n \\ ]\n
"},{"location":"Tools/PlantUML/PlantUML%20Themes/","title":"PlantUML Themes","text":"An overview of all available themes can be seen in the Theme Gallery.
"},{"location":"Tools/PlantUML/PlantUML%20Themes/#usage","title":"Usage","text":"Set a theme
Get all themes with help themes
g g \" + y G
%y+
echo \"deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription\" > /etc/apt/sources.list.d/pve-community.list\n
"},{"location":"Virtualization/ProxMox/proxmox-8-apt-updates/#comment-out-the-enterprise-repository-at-etcaptsourceslistdpve-enterpriselist","title":"Comment out the enterprise repository at /etc/apt/sources.list.d/pve-enterprise.list","text":"sed -i 's/^deb/#deb/' /etc/apt/sources.list.d/pve-enterprise.list\n
"},{"location":"Virtualization/ProxMox/proxmox-8-apt-updates/#change-the-ceph-repository-at-etcaptsourceslistdcephlist","title":"Change the ceph repository at /etc/apt/sources.list.d/ceph.list","text":"sed -i 's/^deb/#deb/' /etc/apt/sources.list.d/ceph.list\necho \"deb http://download.proxmox.com/debian/ceph-quincy bookworm no-subscription\" >> /etc/apt/sources.list.d/ceph.list\n
"},{"location":"Virtualization/ProxMox/proxmox-cpu-consumption/","title":"Reduce CPU consumption on Proxmox","text":"cpufrequtils
via apt install cpufrequtils
apt update && apt install cpufrequtils\n
cpufreq-info -g
cpufreq-info -g\n
cpufreq-info -p\n
powersave
via cpufreq-set -g powersave
cpufreq-set -g powersave\n
echo 'GOVERNOR=\"powersave\"' | tee /etc/default/cpufrequtils\n
Powertop is a tool to diagnose issues with power consumption and power management. It can also be used to tune power management settings.
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#install-powertop","title":"Install powertop","text":"apt install powertop\n
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#run-powertop-calibration","title":"Run powertop calibration","text":"Calibration will toggle various functions on and off to determine the best settings for your system. So it is best to run this when the system is idle.
powertop --calibrate\n
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#run-powertop-to-see-recommendations","title":"Run powertop to see recommendations","text":"With you can switch between the different tabs.
powertop\n
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#auto-tune-power-management-settings-not-reboot-persistent","title":"Auto tune power management settings (not reboot persistent)","text":"powertop --auto-tune\n
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#systemd-service-to-auto-tune-power-management-settings-reboot-persistent","title":"Systemd service to auto tune power management settings (reboot persistent)","text":"cat << EOF > /etc/systemd/system/powertop.service\n[Unit]\nDescription=Powertop tunings\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStart=/usr/sbin/powertop --auto-tune\n\n[Install]\nWantedBy=multi-user.target\nEOF\n\nsystemctl enable --now powertop.service\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/","title":"Passtrough a hard drive from the Proxmox host to a VM","text":""},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#find-the-hard-drive-copy-the-uuid","title":"Find the hard drive & copy the UUID","text":"lsblk -o NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#find-the-vm-id","title":"Find the vm id","text":"qm list\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#passtrough-the-hard-drive-as-scsi","title":"Passtrough the hard drive as scsi","text":"qm set $vm_id -scsi2 /dev/disk/by-uuid/$disk_uuid\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#restart-the-vm","title":"Restart the vm","text":"qm reboot $vm_id\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#in-case-it-should-be-removed","title":"In case it should be removed","text":"qm unlink $vm_id --idlist scsi2\n
"}]}
\ No newline at end of file
+{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Introduction","text":"Notes I made on various topics.
The wiki is built with MkDocs and GitHub Pages. It supports inline PlantUML diagrams.
Inspired by wiki.nikitavoloboev.xyz & The Blue Book.
"},{"location":"#using-the-wiki","title":"Using the wiki","text":"You can quickly search the contents of this wiki above or you can explore the tree view to the left.
"},{"location":"Applications/wallabag/","title":"Wallabag","text":""},{"location":"Applications/wallabag/#run-a-console-command-in-the-container","title":"Run a console command in the container","text":"docker exec -it <containerName> /var/www/wallabag/bin/console <command> --env=prod\n
"},{"location":"Applications/wallabag/#list-commands","title":"List commands","text":"docker exec -it wallabag /var/www/wallabag/bin/console list --env=prod\n
"},{"location":"Applications/wallabag/#get-help-for-a-command","title":"Get help for a command","text":"docker exec -it wallabag /var/www/wallabag/bin/console help <command> --env=prod\n
"},{"location":"Applications/wallabag/#initialize-the-database","title":"Initialize the database","text":"docker exec -it wallabag /var/www/wallabag/bin/console wallabag:install --env=prod --no-interaction\n
"},{"location":"Applications/wallabag/#migrate-database","title":"Migrate database","text":"docker exec -it wallabag /var/www/wallabag/bin/console doctrine:migrations:migrate --env=prod --no-interaction\n
"},{"location":"Applications/wallabag/#list-users","title":"List users","text":"docker exec -it wallabag /var/www/wallabag/bin/console wallabag:user:list --env=prod\n
"},{"location":"Applications/wallabag/#create-a-new-user","title":"Create a new user","text":"docker exec -it wallabag /var/www/wallabag/bin/console fos:user:create --env=prod\n
"},{"location":"Applications/wallabag/#make-a-user-super-admin","title":"Make a user super admin","text":"docker exec -it wallabag /var/www/wallabag/bin/console fos:user:promote <user> --super --env=prod\n
"},{"location":"Applications/wallabag/#demote-and-deactivate-the-initial-wallabag-user","title":"Demote and deactivate the initial wallabag user","text":"docker exec -it wallabag /var/www/wallabag/bin/console fos:user:demote wallabag --super --env=prod\ndocker exec -it wallabag /var/www/wallabag/bin/console fos:user:deactivate wallabag --env=prod\n
"},{"location":"Applications/PiHole/add-unbound/","title":"Add Unbound as a recursive DNS Server to the PiHole setup","text":""},{"location":"Applications/PiHole/add-unbound/#why-would-you-want-this","title":"Why would you want this?","text":"sudo apt install unbound\n
"},{"location":"Applications/PiHole/add-unbound/#configuration","title":"Configuration","text":"Configure unbound with:
cat << EOF > /etc/unbound/unbound.conf.d/pi-hole.conf\nserver:\n # If no logfile is specified, syslog is used\n # logfile: \"/var/log/unbound/unbound.log\"\n verbosity: 0\n\n # view more statistics\n extended-statistics: yes\n\n interface: 127.0.0.1\n port: 5335\n do-ip4: yes\n do-udp: yes\n do-tcp: yes\n\n # May be set to yes if you have IPv6 connectivity\n do-ip6: yes\n\n # You want to leave this to no unless you have *native* IPv6. With 6to4 and\n # Terredo tunnels your web browser should favor IPv4 for the same reasons\n prefer-ip6: no\n\n # Use this only when you downloaded the list of primary root servers!\n # If you use the default dns-root-data package, unbound will find it automatically\n #root-hints: \"/var/lib/unbound/root.hints\"\n\n # Trust glue only if it is within the server's authority\n harden-glue: yes\n\n # Require DNSSEC data for trust-anchored zones, if such data is absent, the zone becomes BOGUS\n harden-dnssec-stripped: yes\n\n # Don't use Capitalization randomization as it known to cause DNSSEC issues sometimes\n # see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378 for further details\n use-caps-for-id: no\n\n # Reduce EDNS reassembly buffer size.\n # IP fragmentation is unreliable on the Internet today, and can cause\n # transmission failures when large DNS messages are sent via UDP. Even\n # when fragmentation does work, it may not be secure; it is theoretically\n # possible to spoof parts of a fragmented DNS message, without easy\n # detection at the receiving end. Recently, there was an excellent study\n # >>> Defragmenting DNS - Determining the optimal maximum UDP response size for DNS <<<\n # by Axel Koolhaas, and Tjeerd Slokker (https://indico.dns-oarc.net/event/36/contributions/776/)\n # in collaboration with NLnet Labs explored DNS using real world data from the\n # the RIPE Atlas probes and the researchers suggested different values for\n # IPv4 and IPv6 and in different scenarios. They advise that servers should\n # be configured to limit DNS messages sent over UDP to a size that will not\n # trigger fragmentation on typical network links. DNS servers can switch\n # from UDP to TCP when a DNS response is too big to fit in this limited\n # buffer size. This value has also been suggested in DNS Flag Day 2020.\n edns-buffer-size: 1232\n\n # Perform prefetching of close to expired message cache entries\n # This only applies to domains that have been frequently queried\n prefetch: yes\n\n # One thread should be sufficient, can be increased on beefy machines. In reality for most users running on small networks or on a single machine, it should be unnecessary to seek performance enhancement by increasing num-threads above 1.\n num-threads: 1\n\n # Ensure kernel buffer is large enough to not lose messages in traffic spikes\n so-rcvbuf: 1m\n\n # Ensure privacy of local IP ranges\n # Needs to be commented out if you have a public dns records (e.g. Cloudflare) resolving to\n # your local IP. Those records will otherwise be unresolvable.\n private-address: 192.168.0.0/16\n private-address: 169.254.0.0/16\n private-address: 172.16.0.0/12\n private-address: 10.0.0.0/8\n private-address: fd00::/8\n private-address: fe80::/10\nEOF\n
Signal PiHole to use this limit
cat << EOF > /etc/dnsmasq.d/99-edns.conf\nedns-packet-max=1232\nEOF\n
"},{"location":"Applications/PiHole/add-unbound/#restart-unbound","title":"Restart unbound","text":"sudo systemctl restart unbound\n
"},{"location":"Applications/PiHole/add-unbound/#test-unbound","title":"Test unbound","text":""},{"location":"Applications/PiHole/add-unbound/#query","title":"Query","text":"dig google.com @127.0.0.1 -p 5335\n
"},{"location":"Applications/PiHole/add-unbound/#dnssec","title":"DNSSec","text":"Get Servfail
dig sigfail.verteiltesysteme.net @127.0.0.1 -p 5335\n
Get NOERROR
dig sigok.verteiltesysteme.net @127.0.0.1 -p 5335\n
"},{"location":"Applications/PiHole/add-unbound/#configure-pihole","title":"Configure PiHole","text":"Now we need to tell PiHole to use unbound as an upstream DNS server.
This is done by editing /etc/pihole/setupVars.conf
and adding/replacing the following line:
PIHOLE_DNS_1=127.0.0.1#5335\nPIHOLE_DNS_2=127.0.0.1#5335\n
Restart PiHole
systemctl restart pihole-FTL.service\n
The PiHole web interface should now show under /admin/settings.php?tab=dns
that the upstream DNS server is 127.0.0.1#5335
.
Under /admin/queries.php
you should see that the queries are now forwarded to 127.0.0.1#5335
.
If that is not the case, maybe you need to manually save the settings in the web interface under /admin/settings.php?tab=dns
.
We want to have two PiHole instances that share the same ip address. If one of the instances fails the other one will take over the ip address.
They will also share the same gravity database so you only have to update the gravity database on one of the instances.
"},{"location":"Applications/PiHole/ha-setup/#why-do-we-want-this","title":"Why do we want this?","text":"If you have a PiHole instance running on a Raspberry Pi and it fails you will have to wait until you can fix it. This means manually changing the dns server on all your devices or trying to change the dhcp server to point to a different dns server. With this setup you will have a backup PiHole instance that will take over the ip address of the primary instance when it fails.
"},{"location":"Applications/PiHole/ha-setup/#requirements","title":"Requirements","text":"pihole -v
, update with pihole -up
pihole -r
to get them to work properlysudo apt install keepalived\n
"},{"location":"Applications/PiHole/ha-setup/#configure-keepalived","title":"Configure keepalived","text":"Script to check if local instance is running
cat << EOF > /usr/local/bin/check-local-pihole\n#!/bin/sh\n\nRUNNING=$(ps -aux | grep pihole-FTL | grep -v grep)\nexit $?\nEOF\n\nchmod +x /usr/local/bin/check-local-pihole\n
"},{"location":"Applications/PiHole/ha-setup/#configure-keepalived-on-the-primary-instance","title":"Configure keepalived on the primary instance","text":"cat << EOF > /etc/keepalived/keepalived.conf\nvrrp_script chk_local_pihole {\n script \"/usr/local/bin/check-local-pihole\" # (1)!\n interval 5\n weight -100\n}\n\nglobal_defs {\n router_id pihole-01 # (2)!\n script_user root\n enable_script_security\n}\n\nvrrp_instance PIHOLE {\n state MASTER # (3)!\n interface eth0 # (4)!\n virtual_router_id 20 # (5)!\n priority 150\n advert_int 1\n unicast_src_ip 192.168.3.21 # (6)!\n unicast_peer {\n 192.168.3.22 # (7)!\n }\n\n authentication {\n auth_type PASS\n auth_pass piholedns # (8)!\n }\n\n virtual_ipaddress {\n 192.168.3.20/23 # (9)!\n }\n\n track_script {\n chk_local_pihole\n }\n}\nEOF\n
cat << EOF > /etc/keepalived/keepalived.conf\nvrrp_script chk_pihole {\n script \"/usr/local/bin/check-local-pihole\"\n interval 1\n weight -100\n}\n\nglobal_defs {\n router_id pihole-02\n script_user root\n enable_script_security\n}\n\nvrrp_instance PIHOLE {\n state BACKUP\n interface eth0\n virtual_router_id 20\n priority 140\n advert_int 1\n unicast_src_ip 192.168.3.22\n unicast_peer {\n 192.168.3.21\n }\n\n authentication {\n auth_type PASS\n auth_pass piholedns\n }\n\n virtual_ipaddress {\n 192.168.3.20/23\n }\n\n track_script {\n chk_local_pihole\n }\n}\nEOF\n
"},{"location":"Applications/PiHole/ha-setup/#start-keepalived","title":"Start keepalived","text":"Run on both instances
systemctl enable --now keepalived.service\n
"},{"location":"Applications/PiHole/ha-setup/#test-keepalived","title":"Test keepalived","text":"ip a
on both instances or looking at the pihole dashboard in the top right cornerapt update && apt install sqlite3 sudo git cron rsync ssh\n
"},{"location":"Applications/PiHole/ha-setup/#install-gravity-sync-script","title":"Install gravity sync script","text":"We will use gravity-sync to sync the gravity database between the two instances.
Install gravity-sync on both instances and follow the instructions.
curl -sSL https://raw.githubusercontent.com/vmstan/gs-install/main/gs-install.sh | bash\n
You can always reset the configuration with gravity-sync config
Run the following command on the primary instance to push the gravity database to the secondary instance.
gravity-sync push\n
"},{"location":"Applications/PiHole/ha-setup/#automate-gravity-database-sync","title":"Automate gravity database sync","text":"Run the following command on both instances to create a systemd timer that will sync the gravity database every 5 minutes.
gravity-sync automate\n
You can check the status of the timer with systemctl status gravity-sync.timer
. And you can check the logs with journalctl -u gravity-sync.service
.
With gravity-sync automate hour
the timer will sync the gravity database every hour.
sudo apt update -y\nsudo apt install golang\n
Clone, compile & move the exporter to the correct location
git clone https://github.com/letsencrypt/unbound_exporter.git\ncd unbound_exporter\ngo build\nsudo install -o root -g root -m 0755 unbound_exporter /usr/local/bin/unbound-exporter\ncd ..\nrm -rf unbound_exporter\n
"},{"location":"Applications/PiHole/install-unbound-prometheus-exporter/#create-a-systemd-service","title":"Create a systemd service","text":"cat << EOF > /etc/systemd/system/unbound-exporter.service\n[Unit]\nDescription=Unbound Prometheus Exporter\nAfter=network.target\n\n[Service]\nType=simple\nUser=root\nGroup=root\nRestart=always\nExecStart=/usr/local/bin/unbound-exporter -web.listen-address \":9167\" -web.telemetry-path \"/metrics\"\n\n[Install]\nWantedBy=multi-user.target\nEOF\n
"},{"location":"Applications/PiHole/install-unbound-prometheus-exporter/#start-the-service","title":"Start the service","text":"sudo systemctl daemon-reload\nsudo systemctl enable --now unbound-exporter.service\n
"},{"location":"Applications/PiHole/install-unbound-prometheus-exporter/#test-the-exporter","title":"Test the exporter","text":"curl localhost:9167/metrics\n
"},{"location":"Blog/Misc/blog-gh-pages-mkdocs/","title":"How to create a blog with GitHub Pages and MkDocs","text":""},{"location":"Blog/Misc/blog-gh-pages-mkdocs/#dockerfile","title":"Dockerfile","text":"Create the Containerfile at Dockerfile
or Containerfile
.
FROM docker.io/ubuntu:focal\n\nRUN : \\\n && apt-get update -y \\\n && apt-get install -y --no-install-recommends \\\n python3 \\\n python3-venv \\\n python3-pip \\\n && rm -rf /var/lib/api/lists*\n\nWORKDIR /src\n\nCOPY requirements.txt .\nENV PATH = /venv/bin:$PATH\n\nRUN : \\\n && python3 -m venv /venv \\\n && python3 -m pip --no-cache-dir install -r requirements.txt\n\nCOPY . .\n\nWORKDIR /src/blog\n
"},{"location":"Blog/Misc/blog-gh-pages-mkdocs/#taskfile","title":"Taskfile","text":"To store some reoccuring tasks we use a Taskfile. To install Task use this link or just use sudo sh -c \"$(curl --location https://taskfile.dev/install.sh)\" -- -d -b /usr/local/bin
Create the Taskfile.yml
.
# https://taskfile.dev\n\nversion: \"3\"\n\nvars:\n CONTAINER_NAME: blog.rwxd.eu\n CURRENT_DIR:\n sh: pwd\n SITE_DIR: \"{{.CURRENT_DIR}}/docs/site\"\n\ntasks:\n default:\n cmds:\n - task -l\n silent: true\n\n setup:\n desc: Setup requirements\n cmds:\n - python3 -m pip install -r requirements.txt -q\n - pre-commit install\n silent: true\n\n image:\n desc: builds container image with name blog.rwxd.eu\n cmds:\n - podman build -t {{.CONTAINER_NAME}} -f ./Containerfile\n silent: true\n\n serve:\n desc: Serve blog with a container\n vars:\n PORT: 8000\n MOUNT: \"{{.CURRENT_DIR}}/src\"\n cmds:\n - task: image\n - podman run --rm -p {{.PORT}}:8000 -v ./:/src {{.CONTAINER_NAME}} mkdocs serve\n silent: true\n\n serve-local:\n desc: Serve blog local\n dir: ./blog\n cmds:\n - mkdocs serve\n silent: true\n\n build:\n desc: Build blog pages\n cmds:\n - task: image\n - mkdir -p {{.SITE_DIR}}\n - podman run --rm -v {{.SITE_DIR}}:/src/blog/site {{.CONTAINER_NAME}} sh -c \"mkdocs build\"\n
"},{"location":"Blog/Misc/pluralsight_trial/","title":"Pluralsight demo / trial","text":"Create a new Pluralsight Account with a one month demo trough Visual Studio Dev Essentials.
sudo apt install sane sane-utils sanebd\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#configuration","title":"Configuration","text":"Copy sane configuration to scanbd.
cp -r /etc/sane.d/* /etc/scanbd/sane.d/\n
Modify /etc/sane.d/dll.conf
so that only net
is uncommented in the configuration.
# genesys\nnet\n# canon\n
Test if the scanner is detected
SANE_CONFIG_DIR=/etc/scanbd scanimage -A\n
root@scanner:/opt/insaned# SANE_CONFIG_DIR=/etc/scanbd scanimage -L\ndevice 'genesys:libusb:001:004' is a Canon CanoScan 5600F flatbed scanner\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#start-enable-the-service","title":"Start & enable the service","text":"sudo systemctl start scanbd\nsudo systemctl enable scanbd\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#edit-the-button-configuration","title":"Edit the button configuration","text":"/etc/scanbd/scanbd.conf
The scan
action runs the test.script
per default. The path of the script or the content can be changed.
action scan {\n filter = \"^scan.*\"\n numerical-trigger {\n from-value = 1\n to-value = 0\n }\n desc = \"Scan to file\"\n script = \"/usr/local/bin/scan-to-share\"\n }\n
At the bottom
# devices\n# each device can have actions and functions, you can disable not relevant devices\ninclude(scanner.d/canon.conf)\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#debugging","title":"Debugging","text":"systemctl stop scanbd\nSANE_CONFIG_DIR=/etc/scanbd scanbd -f\n
More verbose:
systemctl stop scanbd\nSANE_CONFIG_DIR=/etc/scanbd scanbd -f -d7\n
"},{"location":"Blog/Misc/sane-scanbd-canon-5600f/#scan-script","title":"Scan script","text":"#!/usr/bin/env bash\nset -x -e -o pipefail\n\nlog_file=\"/var/scans/scan.log\"\necho \"Starting script\" >> \"$log_file\"\n\nresolution=300\nfile_ending=jpg\nformat=jpeg\nmode=color\n\nfile_data=$(date +'%Y_%m_%d-%H_%M_%S')\nfilename=\"$file_data.$file_ending\"\ntemp_path=\"/tmp/$filename\"\ndest_path=\"/var/scans/scanned/$file_data.pdf\"\n\necho \"Destination path \\\"$dest_path\\\"\" >> \"$log_file\"\necho \"Starting scan with resolution $resolution, format $format & mode $mode\" >> \"$log_file\"\n\nexport SANE_CONFIG_DIR=/etc/scanbd\nscanimage --format \"$format\" --resolution=\"$resolution\" --mode \"$mode\" -v -p > \"$temp_path\"\nimg2pdf \"$temp_path\" -o \"$dest_path\"\nrm \"$temp_path\"\nchmod 777 \"$dest_path\"\n
"},{"location":"DevOps/Continuous-Integration/Ansible/ansible-runner/","title":"ansible-runner","text":""},{"location":"DevOps/Continuous-Integration/Ansible/ansible-runner/#usage","title":"Usage","text":"Run with docker as process isolation
ansible-runner run demo -m debug --hosts localhost -a msg=hello --container-image quay.io/ansible/awx-ee -vvvv --process-isolation --process-isolation-executable=docker
The molecule project helps to develop and test Ansible roles.
python3 -m pip install molecule
Generate a new role molecule init role <name>
Init in existing role molecule init scenario
List drivers molecule drivers
- name: Wait for port 22\n wait_for:\n host: \"{{ ansible_host }}\"\n port: 22\n state: started\n delay: 10\n sleep: 1\n connect_timeout: 5\n timeout: 900\n delegate_to: 127.0.0.1\n
"},{"location":"DevOps/Continuous-Integration/GitLab-CICD/clear_artifacts/","title":"Script to clear GitLab CI/CD Artifacts","text":"import requests\nimport json\n\nclass BearerAuth(requests.auth.AuthBase):\n def __init__(self, token):\n self.token = token\n def __call__(self, r):\n r.headers[\"authorization\"] = \"Bearer \" + self.token\n return r\n\nproject = '804'\ntoken='ijuiosjdiof'\n\nfor page in range(1, 200):\n url = f'https://gitlab.com/api/v4/projects/{project}/jobs?per_page=100&page={page}'\n print(f'Getting jobs from {url}')\n response = requests.get(url, auth=BearerAuth(token))\n\n data= json.loads(response.text)\n\n for item in data:\n url=f'https://gitlab.com/api/v4/projects/{project}/jobs/{item[\"id\"]}/artifacts'\n print(f'Running on {url}')\n response = requests.delete(url, auth=BearerAuth(token))\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/","title":"Docker commands","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#stop-things","title":"Stop things","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#stop-all-containers","title":"Stop all containers","text":"docker stop $(docker ps -aq)\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#remove-things","title":"Remove things","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#remove-all-containers","title":"Remove all containers","text":"docker rm $(docker ps -aq)\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#remove-stop-all-containers","title":"Remove & stop all containers","text":"docker rm -f $(docker ps -aq)\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#all-images","title":"All Images","text":"docker rmi $(docker images -q)\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-commands/#start-docker-daemon-in-debug-mode","title":"Start docker daemon in debug mode","text":"sudo dockerd --debug\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-ipv6/","title":"Docker with IPv6","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/docker-ipv6/#setup","title":"Setup","text":"Test with a busybox container docker run --rm -it busybox sh
```Dockerfile
"},{"location":"DevOps/Infrastructure-Solutions/Container/Docker/ssh-keys/#install-pip-requirements","title":"install pip requirements","text":"SHELL [\"/bin/bash\", \"-o\", \"pipefail\", \"-c\"] RUN : \\ && eval \"$(ssh-agent -s)\"\\ && mkdir -p /root/.ssh \\ && chmod 0700 /root/.ssh \\ && echo ${GITLAB_SSH_PRIVATE_KEY} | base64 -d >> /root/.ssh/id_rsa \\ && chmod 0700 /root/.ssh/id_rsa \\ && ssh-add /root/.ssh/id_rsa \\ && ssh-keyscan gitlab.com >> /root/.ssh/known_hosts \\ && chmod 0644 /root/.ssh/known_hosts \\ && python3 -m venv /venv \\ && python3 -m pip install --no-cache-dir -r requirements.txt \\ && rm -f /root/.ssh/id_rsa
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/ckad/","title":"CKAD - Certified Kubernetes Application Developer","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/ckad/#books","title":"Books","text":"curl -sfL https://get.k3s.io | sh -s - --write-kubeconfig-mode 644\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/Install%20K3S%20on%20Raspberry%20PIs/#without-traefik","title":"Without Traefik","text":"curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC=\"--no-deploy traefik\" sh -s - --write-kubeconfig-mode 644\n
Get a token for the worker nodes
sudo cat /var/lib/rancher/k3s/server/token\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/Install%20K3S%20on%20Raspberry%20PIs/#worker-node","title":"Worker Node","text":"Set the K3S Token
export K3S_TOKEN=blablabla\n
curl -sfL https://get.k3s.io | K3S_URL=https://manager01.fritz.box:6443 K3S_TOKEN=$K3S_TOKEN sh -\n
Generate kubeconfig
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/Install%20K3S%20on%20Raspberry%20PIs/#create-a-service-account-for-kubectl","title":"Create a Service Account for kubectl","text":"kubectl -n default apply -f - <<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: home-computer\nEOF\n\nkubectl -n default apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n name: gitlab-service-account-role-binding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cluster-admin\nsubjects:\n - kind: ServiceAccount\n name: home-computer\n namespace: default\nEOF\n\nkubectl describe secret home-computer-token\n\nexport K8S_SERVER=\"https://192.168.2.31:6443\"\nexport K8S_CLUSTER=\"k3s-home\"\nexport K8S_USER=\"home-computer\"\nexport K8S_USER_TOKEN=\"blabla\" \n\nkubectl config set-cluster $K8S_CLUSTER --server=$K8S_SERVER --insecure-skip-tls-verify=true\nkubectl config set-credentials $K8S_USER --token=$K8S_USER_TOKEN\nkubectl config set-context $K8S_CLUSTER --cluster=$K8S_CLUSTER --user=$K8S_USER\nkubectl config use-context $K8S_CLUSTER\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/raspberry/","title":"K3s on Raspberry","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/raspberry/#errors","title":"Errors","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/K3S/raspberry/#failed-to-find-memory-cgroup-you-may-need-to-add","title":"Failed to find memory cgroup, you may need to add...","text":"Solution
sudo vim /boot/firmware/cmdline.txt\n
Add cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1
into end of the file.
The hybrid mode works for the control plane and kubernetes service https://kube-vip.io/hybrid/
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Networking/metallb/","title":"MetalLB","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Networking/metallb/#install-with-kubectl","title":"Install with kubectl","text":"https://metallb.universe.tf/installation/
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/namespace.yaml kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.10.2/manifests/metallb.yaml\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Networking/metallb/#config","title":"config","text":"apiVersion: v1\nkind: ConfigMap\nmetadata:\n namespace: metallb-system\n name: config\ndata:\n config: |\n address-pools:\n - name: default\n protocol: layer2\n addresses:\n - 192.168.3.200-192.168.3.250\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Networking/metallb/#install-with-terraform","title":"Install with Terraform","text":"resource \"kubernetes_namespace\" \"metallb\" {\n metadata {\n name = \"metallb\"\n }\n}\n\nresource \"helm_release\" \"metallb\" {\n name = \"metallb\"\n repository = \"https://metallb.github.io/metallb\"\n chart = \"metallb\"\n namespace = \"metallb\"\n\n depends_on = [kubernetes_namespace.metallb]\n\n set {\n name = \"configInline.address-pools[0].name\"\n value = \"default\"\n type = \"string\"\n }\n\n set {\n name = \"configInline.address-pools[0].protocol\"\n value = \"layer2\"\n type = \"string\"\n }\n\n set {\n name = \"configInline.address-pools[0].addresses[0]\"\n value = \"192.168.3.200-192.168.3.250\"\n type = \"string\"\n }\n}\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Tanzu-Kubernetes-Cluster/kubectl-vsphere/","title":"Kubectl vSphere","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Tanzu-Kubernetes-Cluster/kubectl-vsphere/#usage","title":"Usage","text":"Login to a cluster ``
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/Tanzu-Kubernetes-Cluster/kubectl-vsphere/#links","title":"Links","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/kubectl/CronJob/","title":"Test CronJob","text":"kubectl create job --from=cronjob/<name> <new-pod-name>
kubectl create job --from=cronjob/check-job test-job-1
kubectl run -i --tty --rm debug --image=busybox --restart=Never -- sh
NAMESPACE=test && kubectl get pods -n $NAMESPACE | grep Error | cut -d' ' -f 1 | xargs kubectl delete pod -n $NAMESPACE\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/kubectl/label/","title":"nodes","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/kubectl/label/#add-node-label","title":"add node label","text":"kubectl label node node01 node-role.kubernetes.io/name
kubectl label node node01 node-role.kubernetes.io/name-
gitlab-service-account.yml with ClusterRoleBinding
---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: gitlab-service-account\n\n---\napiVersion: rbac.authorization.k8s.io/v1beta1\nkind: ClusterRoleBinding\nmetadata:\n name: gitlab-service-account-role-binding\nroleRef:\n apiGroup: rbac.authorization.k8s.io\n kind: ClusterRole\n name: cluster-admin\nsubjects:\n - kind: ServiceAccount\n name: gitlab-service-account\n namespace: default\n
gitlab-service-account.yml with RoleBinding
---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: gitlab-service-account\n namespace: <KUBE_NAMESPACE>\n\n---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: cicd-role\n namespace: <KUBE_NAMESPACE>\nrules:\n- apiGroups:\n - \"\"\n - apps\n - extensions\n resources:\n - '*'\n verbs:\n - '*'\n\n---\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1beta1\nmetadata:\n name: cicd-role\n namespace: <KUBE_NAMESPACE>\nsubjects:\n - kind: ServiceAccount\n name: gitlab-service-account\nroleRef:\n kind: Role\n name: cicd-role\n apiGroup: rbac.authorization.k8s.io\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Kubernetes/kubectl/serviceaccount/#get-the-created-token","title":"Get the created token","text":"kubectl -n <KUBE_NAMESPACE> describe secret gitlab-service-account-token-\n
export K8S_SERVER=\"https://10.24.1.1:6443\"\nexport K8S_CLUSTER=\"gitlab-test\"\nexport K8S_USER=\"gitlab-service-account\"\nexport K8S_USER_TOKEN=\"\"\n\nkubectl config set-cluster $K8S_CLUSTER --server=$K8S_SERVER --insecure-skip-tls-verify=true\nkubectl config set-credentials $K8S_USER --token=$K8S_USER_TOKEN\nkubectl config set-context $K8S_CLUSTER --cluster=$K8S_CLUSTER --user=$K8S_USER\nkubectl config use-context $K8S_CLUSTER\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/migrate-compose-to-kubefiles/","title":"Migrate from podman-compose to Kubefiles","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/migrate-compose-to-kubefiles/#overview","title":"Overview","text":"Kubefiles are a way to define a podman pod and containers in a single file. They are similar to docker-compose files, but can also be used with Kubernetes.
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/migrate-compose-to-kubefiles/#requirements","title":"Requirements","text":"The podman-compose or docker-compose file must be started with podman-compose up -d
and the created podman pod should be listed with podman pod ls
.
Get the pod name via podman pod ls
and generate the Kubefile with:
podman kube generate <pod_name> -f pod.kube.yaml\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/migrate-compose-to-kubefiles/#persistent-volume-claim","title":"Persistent Volume Claim","text":"Get the volume name via podman volume ls
and generate the Kubefile with:
podman kube generate <volume_name> -f pvc.kube.yaml\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-compose-systemd-files/","title":"Use systemd files with rootless podman-compose","text":"Currently (as of 6/15/2023), podman-compose must be manually installed to use version 1.0.7 (check with podman-compose -v), because pods are not used by default.
pip3 install git+https://github.com/containers/podman-compose.git\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-compose-systemd-files/#setup","title":"Setup","text":"Add the rootless podman user to the systemd-journal group to watch logs.
usermod -aG systemd-journal podman\n
Create the systemd podman-compose unit with root permissions
sudo podman-compose systemd --action create-unit\nsudo systemctl daemon-reload\n
Change to the directory where your podman-compose file resides.
Register the project
podman-compose systemd --action register\n\n# or with a different file name than podman-compose.yaml\npodman-compose -f docker-compose.yaml systemd --action register\n
Enable and start the systemd service
systemctl --user enable --now 'podman-compose@project-name'\n
Stop & Start
systemctl --user stop 'podman-compose@project-name'\nsystemctl --user start 'podman-compose@project-name'\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-compose-systemd-files/#troubleshooting","title":"Troubleshooting","text":"When the systemd unit is created you can use
podman pod ls\n\npodman pod inspect pod_project-name\n\nsystemctl --user status -l podman-compose@project-name\n\njournalctl --user -xu podman-compose@project-name\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/","title":"Podman Quadlets","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#pre-requisites","title":"Pre-requisites","text":"When using rootless podman a directory under the user's home directory must be created for the quadlet files.
mkdir -p ~/.config/containers/systemd\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#container","title":"Container","text":"A container quadlet file must end with .container
in the ~/.config/containers/systemd
directory.
Example quadlet file to run a deluge container (deluge.container
file):
[Install]\nWantedBy=default.target\n\n[Unit]\nAfter=mullvadvpn.service\n\n[Container]\nImage=docker.io/linuxserver/deluge:latest\nVolume=/opt/container/deluge/downloads/:/downloads\nVolume=/opt/container/deluge/config/:/cofnig\n\n[Service]\n# Restart service when sleep finishes\nRestart=always\n# Extend Timeout to allow time to pull the image\nTimeoutStartSec=900\n
All the options for the quadlet file can be found in the podman documentation.
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#start","title":"Start","text":"systemctl --user daemon-reload\nsystemctl --user start deluge\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#logs","title":"Logs","text":"podman logs systemd-deluge\n\njournactl -f | grep deluge\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/podman-quadlets/#pods","title":"Pods","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/","title":"Podman rootless setup","text":""},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/#install-podman","title":"Install podman","text":"dnf install -y podman podman-docker\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/#enable-low-ports","title":"Enable low ports","text":"if ! grep -q \"net.ipv4.ip_unprivileged_port_start=80\" /etc/sysctl.conf; then echo \"net.ipv4.ip_unprivileged_port_start=80\" >> /etc/sysctl.conf; fi\n\n# Reload sysctl\nsysctl --system\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/#create-user","title":"Create user","text":"useradd -m -s /bin/bash container\nsudo -iu container\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/Podman/setup-rootless/#create-podman-socket","title":"Create podman socket","text":"if ! grep -q \"loginctl enable-linger\" ~/.bashrc; then echo \"loginctl enable-linger $(whoami)\" >> ~/.bashrc; fi\nif ! grep -q \"$temp\" ~/.bashrc; then echo \"XDG_RUNTIME_DIR=/run/user/$(id -u)\" >> ~/.bashrc; fi\nsource ~/.bashrc\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/additional/Traefik/Excluding/","title":"Excluding","text":"services:\n whoami:\n image: \"traefik/whoami\"\n container_name: \"whoami-test\"\n labels:\n - \"traefik.enable=true\"\n - \"traefik.http.routers.whoami-test.rule=Host(`whoami-test.fritz.box`)\"\n - \"traefik.http.routers.whoami-test.entrypoints=http\"\n - \"traefik.http.routers.whoami-test.middlewares=intern_whitelist\"\n - \"traefik.http.middlewares.intern_whitelist.ipwhitelist.sourcerange=192.168.2.0/23\"\n - \"traefik.http.middlewares.intern_whitelist.ipwhitelist.ipstrategy.excludedips=192.168.2.1, 192.168.2.124\"\n
"},{"location":"DevOps/Infrastructure-Solutions/Container/additional/Traefik/External%20Services/","title":"External Services","text":"http:\n routers:\n intern: {}\n entryPoints:\n - \"http\"\n - \"https\"\n rule: \"Host(`HostRegexp(`fritz.box`, `{subdomain:[a-z]+}.fritz.box`, ...)`)\"\n pihole:\n entryPoints:\n - \"http\"\n - \"https\"\n rule: \"Host(`pihole.fritz.box`)\"\n service: pihole\n middlewares:\n - addprefix-pihole\n services:\n pihole:\n loadBalancer:\n servers:\n - url: \"http://192.168.2.19:80\"\n passHostHeader: true\n middlewares:\n addprefix-pihole:\n addPrefix:\n prefix: \"/admin\"\n
"},{"location":"DevOps/Infrastructure-as-Code/ArgoCD/Setup%20Argo%20CD%20with%20a%20Helm%20Charts%20Repository/","title":"Install Argo CD","text":"Getting Started Guide
kubectl create namespace argocd \nkubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml\n
Download Argo CD CLI
Create a load balancer to use the API Server
kubectl patch svc argocd-server -n argocd -p '{\"spec\": {\"type\": \"LoadBalancer\"}}'\n
Get the initial admin secrets
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath=\"{.data.password}\" | base64 -d\n
"},{"location":"DevOps/Infrastructure-as-Code/ArgoCD/cli-cluster-login/","title":"Login with the ArgoCD Cli in the current cluster","text":""},{"location":"DevOps/Infrastructure-as-Code/ArgoCD/cli-cluster-login/#prerequisites","title":"Prerequisites","text":"# change the default namespace of your current context to argocd\nkubectl config set-context --current --namespace=argocd\n\nargocd login --core\n
Check for access to the API Server
argocd app list\n
"},{"location":"Math/Books/konkrete-mathematik-mit-python/","title":"Bearbeitung der Aufgaben von Konkrete Mathematik mit Python
","text":""},{"location":"Math/Books/konkrete-mathematik-mit-python/#15","title":"15","text":"def potenz(a: int, b: int) -> int:\n p = 1\n while b > 0:\n p *= a\n b -= 1\n return p\n\nassert potenz(2, 2) == 4\nassert potenz(3, 3) == 27\nassert potenz(3, 0) == 1\n
"},{"location":"Math/Books/konkrete-mathematik-mit-python/#17","title":"17","text":""},{"location":"Math/Discrete%20Math/Sets/","title":"Set-builder notation","text":"The set of all \\(x\\) in \\(S\\) such that \\(P(x)\\) is true -> \\(\\{ x \\in S | P(x) \\}\\)
"},{"location":"Misc/jwt-analyzing/","title":"Show content of a JWT token","text":"jwt=\"ey....\"\njq -R 'split(\".\") | .[1] | @base64d | fromjson' <<< \"$jwt\"\n
"},{"location":"Misc/openvpn-container-iptables-error/","title":"Problems with iptables in a OpenVPN container; leaking the real IP","text":"I was using the container \"dperson/openvpn-client:latest\" in combination with a deluge container. Which has the --net=container:vpn
option to use the same network stack as the vpn container.
By using the website https://ipleak.net/ I noticed that my real IP was leaking while testing the torrent client. The torrent client was listed with the VPN ip and the real ip. Using curl ipinfo.io
showed only the VPN ip.
The host is an AlmaLinux 9.2.
In the container logs where the following lines:
> docker logs vpn | grep \"ip\\dtables\"\ntables v1.8.4 (legacy): can't initialize iptables table `filter': Table does not exist (do you need to insmod?)\nPerhaps iptables or your kernel needs to be upgraded.\n
IP tables version on the host and in the container:
> iptables -V\niptables v1.8.8 (nf_tables)\n\n> docker exec vpn iptables -V\niptables v1.8.4 (legacy)\n
So the container was using the legacy iptables version. Also visible in the Dockerfile.
The nftables_nat modules are loaded, but the legacy iptables_nat modules are not.
> lsmod | grep nf_nat\nnf_nat 57344 3 xt_nat,nft_chain_nat,xt_MASQUERADE\n\n> lsmod | grep \"^ip\\w*table_nat\"\n
So we can load the legacy modules with modprobe.
> modprobe iptable_nat\n> modprobe ip6table_nat\n
> lsmod | grep \"^ip\\w*table_nat\"\n
Now the legacy modules are loaded and the error message is gone.
> docker restart vpn\n
Make the modules persistent.
touch /etc/modules-load.d/iptables_nat.conf\nprintf \"iptable_nat\\nip6table_nat\\n\" > /etc/modules-load.d/iptables_nat.conf\n
This solution also works when podman is used instead of docker.
"},{"location":"Networking/Cumulus%20Linux/about-cumulus-linux/","title":"About Cumulus Linux","text":""},{"location":"Networking/Misc/nfcapd/","title":"NFCAPD (NetFlow Capture Daemon)","text":""},{"location":"Networking/Misc/nfcapd/#show-running-captures","title":"Show running captures","text":"sudo ps -e -o command | grep nfcapd\n
"},{"location":"Networking/Misc/nfcapd/#edit-configuration","title":"Edit configuration","text":"Find the nfsen configuration first
sudo find / -type f -name \"nfsen.conf\"\n
vim /opt/etc/nfsen.conf\n
"},{"location":"Networking/Misc/nfcapd/#links","title":"Links","text":"The 95th percentile is a commonly used statistical measure to discard the top 5% of the highest values in a dataset. In network traffic calculation, it's a method used to determine the bandwidth billing rate, highlighting the regular usage while excluding rare usage spikes.
"},{"location":"Networking/Misc/traffic-billing/#how-it-works","title":"How it works","text":"def calculate_95th_percentile(data):\n data.sort() # Step 1: Sort the data\n index = 0.95 * len(data) # Step 2: Determine the 95th percentile index\n\n # Step 3: Get the value\n if index.is_integer():\n return data[int(index)-1] # Python indices are 0-based\n else:\n return data[int(round(index))-1]\n\n# Example data: Traffic measurements (in Mbps) every 5 minutes for a day (288 measurements for 24 hours)\ntraffic_data = [random.randint(50, 200) for _ in range(288)] # Random traffic data between 50 Mbps and 200 Mbps\n\npercentile_value = calculate_95th_percentile(traffic_data)\nprint(f\"95th Percentile Value: {percentile_value} Mbps\")\n
"},{"location":"Networking/NSX-T/NSX-T%20Links/","title":"NSX-T Links","text":"Network virtualization is the carving up of a single physical network into many virtual networks. Virtualizing a resource allows it to be shared by multiple users. Sharing allows the efficient use of a resource when no single user can utilize the entire resource.
Virtualization affords each user the illusion that they own the resource. In the case of virtual networks, each user is under the illusion that there are no other users of the network. To preserve the illusion, virtual networks are isolated from one another. Packets cannot accidentally leak from one virtual network to another.
"},{"location":"Networking/Virtualization/network-virtualization/#links","title":"Links:","text":"VXLAN (Virtual Extensible LAN) is a standard overlay protocol that abstracts logical virtual networks from the physical network underneath. With VXLAN simple and scalable layer 3 Clos architectures can be deployed, while extending layer 2 segments over that layer 3 network. VTEPs (VXLAN Tunnel Endpoints) are the tunnel edges.
VXLAN uses a VLAN-like encapsulation technique to encapsulate MAC-based layer 2 Ethernet frames within layer 3 UDP packets Each virtual network is a VXLAN logical layer 2 segment. VXLAN scales to 16 million segments - a 24-bit VXLAN network identifier (VNI ID) in the VXLAN header - for multi-tenancy.
In a large VXLAN deployment, two aspects need attention: 1. discovery of other endpoints (VTEPs) sharing the same VXLAN segments 2. avoidance of BUM frames (broadcast, unknown unicast and multicast) as they have to be forwarded to all VTEPs.
On Cumulus Linux each VXLAN is locally configured using a bridge for local virtual interfaces. The bridge is taking care of the local MAC addresses (notably, using source-address learning) and the VXLAN interface takes care of the remote MAC addresses (received with BGP EVPN).
"},{"location":"Networking/Virtualization/VXLAN/vxlan-general/#links","title":"Links:","text":"Extended IP access list TEST\n 2 permit ip host 10.10.10.1 host 10.10.10.2\n 3 permit ip host 10.10.10.3 host 10.10.10.4\n
"},{"location":"Networking/cisco/ios/acl/#command","title":"Command","text":"ip access-list resequence TEST 10 10\n
"},{"location":"Networking/cisco/ios/acl/#after","title":"After","text":"Extended IP access list TEST\n 10 permit ip host 10.10.10.1 host 10.10.10.2\n 20 permit ip host 10.10.10.3 host 10.10.10.4\n
"},{"location":"Networking/containerlab/cumulus/","title":"Cumulus on Containerlab","text":""},{"location":"Networking/containerlab/cumulus/#usage","title":"Usage","text":""},{"location":"Networking/containerlab/cumulus/#cvx","title":"CVX","text":"Container Image: docker.io/networkop/cx:5.1.0 Username: root Password: root
"},{"location":"Networking/containerlab/ssh/","title":"SSH into Containerlab devices","text":""},{"location":"Networking/containerlab/ssh/#ssh-config","title":"SSH Config","text":"$HOME/.ssh/config
host clab-*\n StrictHostKeyChecking no\n UserKnownHostsFile /dev/null\n
"},{"location":"Networking/vyos/install/","title":"Install vyos","text":""},{"location":"Networking/vyos/install/#download-live-image","title":"Download live image","text":"Nightly builds
"},{"location":"Networking/vyos/install/#install-permanent-on-disk","title":"Install permanent on disk","text":"vyos@vyos:~$ install image\n
"},{"location":"Operating-Systems/Android/apps/shelter/","title":"Shelter","text":"Shelter is a Free and Open-Source (FOSS) app that leverages the \"Work Profile\" feature of Android to provide an isolated space that you can install or clone apps into. https://github.com/PeterCxy/Shelter
"},{"location":"Operating-Systems/Android/apps/shelter/#links","title":"Links","text":"Install VcXsrv
Start XLaunch
with enabled clipboard and monitor 1
Set the Windows environment variable DISPLAY=\"127.0.0.1:1.0\"
Connect through SSH with the -Y
option.
Linux script to check working connection:
#!/usr/bin/env bash\n\nif ! timeout 3s xset q &>/dev/null; then\n echo \"No X server at \\$DISPLAY [$DISPLAY]\" >&2\n exit 1\nfi\n\necho \"Seems to work :)\"\n
"},{"location":"Operating-Systems/Linux/CLI/htpasswd/","title":"htpasswd","text":""},{"location":"Operating-Systems/Linux/CLI/htpasswd/#hash-bcrypt-with-input","title":"Hash BCrypt with input","text":"htpasswd -B -n username\n
"},{"location":"Operating-Systems/Linux/CLI/htpasswd/#run-with-a-container","title":"Run with a container","text":"docker run --rm -it httpd:latest htpasswd -B -n username\n
"},{"location":"Operating-Systems/Linux/CLI/iotop/","title":"iotop","text":""},{"location":"Operating-Systems/Linux/CLI/iotop/#watch-processes-accumulated","title":"Watch processes accumulated","text":"iotop -aoP\n
"},{"location":"Operating-Systems/Linux/CLI/nmap/","title":"NMAP","text":""},{"location":"Operating-Systems/Linux/CLI/nmap/#scan-network-for-host-with-open-ssh-port","title":"Scan network for host with open ssh port","text":""},{"location":"Operating-Systems/Linux/CLI/ssh/","title":"SSH","text":""},{"location":"Operating-Systems/Linux/CLI/ssh/#socks-proxy","title":"SOCKS Proxy","text":"ssh -D 1337 -C $USER@<target>\n
"},{"location":"Operating-Systems/Linux/CLI/tee/","title":"Tee","text":"With tee
it is possible to read from standard input and write to standard output and files (or commands) at the same time.
Log into file and stdout: foo | tee output.file
Append to a file: foo | tee -a output.file
Include stderr: foo 2>&1 | tee output.file
2>&1
redirects channel 2 (stderr/standard error) into channel 1 (stdout/standard output), such that both is written as stdout
Execute a program periodically, showing output in fullscreen.
"},{"location":"Operating-Systems/Linux/CLI/watch/#usage","title":"Usage","text":"watch du -sh file
Custom interval in seconds (defaults to every 2 seconds): watch -n 1 du -sh file
nmcli connection modify <name> connection.autoconnect yes\n
nmcli connection modify <name> 802-11-wireless-security.psk <psk>\n
nmcli connection up <name>\n
"},{"location":"Operating-Systems/Linux/Misc/Time/","title":"Time","text":""},{"location":"Operating-Systems/Linux/Misc/Time/#list-timezone","title":"List timezone","text":"timedatectl list-timezones
sudo timedatectl set-timezone Europe/Berlin
Install chronic
apt install moreutils\n
/etc/cron.d/01-example-cron
#!/usr/bin/env bash\nSHELL=/bin/bash\nPATH=/sbin:/bin:/usr/sbin:/usr/bin/usr/local/bin\nMAILTO=root,my-mail@example.org\n\n0 0 * * * root chronic /usr/local/bin/backup\n
"},{"location":"Operating-Systems/Linux/Misc/cryptsetup/","title":"Cryptsetup","text":"# format the disk with the luks structure\ncryptsetup luksFormat /dev/sda4\n\n# open the encrypted partition and map it to /dev/mapper/cryptroot\ncryptsetup luksOpen /dev/sda4 cryptroot\n\n# format as usual\nmkfs.ext4 -L nixos /dev/mapper/cryptroot\n
"},{"location":"Operating-Systems/Linux/Misc/window-names/","title":"Show window names","text":"Run the following command, after that click on a window to see its name
xprop | grep \"NAME\"\n
"},{"location":"Operating-Systems/Linux/Misc/window-names/#example","title":"Example","text":"\u276f xprop | grep \"NAME\"\nWM_NAME(STRING) = \"Spotify\"\n_NET_WM_NAME(UTF8_STRING) = \"Spotify\"\n
"},{"location":"Operating-Systems/Linux/Networking/Bridge%20Interface/","title":"Create a bridge interface","text":""},{"location":"Operating-Systems/Linux/Networking/Bridge%20Interface/#with-iproute2","title":"With iproute2","text":"Create a new bridge ip link add name bridge_name type bridge
Set interface to state up ip link set bridge_name up
Add an interface to the bridge (state of the interface must be up) ip link set eth0 master bridge_name
Verify bridge bridge link
Remove interace from a bridge ip link set eth0 nomaster
Edit file /etc/systemd/network/mybridge.network
[Match]\nName=br0\n\n[Network]\nDHCP=ipv4\n
Enable, start and reload systemd-networkd
sudo systemctl enable systemd-networkd\nsudo systemctl start systemd-networkd\nsudo systemctl reload systemd-networkd\n
"},{"location":"Operating-Systems/Linux/Networking/dns/","title":"DNS","text":""},{"location":"Operating-Systems/Linux/Networking/dns/#find-local-dns-resolver","title":"Find local DNS resolver","text":"sudo lsof -i :53 -S\n
"},{"location":"Operating-Systems/Linux/PulseAudio/Volume/","title":"PulseAudio Volume Stuff","text":""},{"location":"Operating-Systems/Linux/PulseAudio/Volume/#find-devices","title":"Find devices","text":"t=$(pacmd list-sinks && pacmd list-sinks && pacmd list-sources) && echo $t | grep \"name:\"
pacmd set-source-volume alsa_input.usb-Burr-Brown_from_TI_USB_Audio_CODEC-00.analog-stereo 0x25000
Install blueman
Launch the graphical settings with blueman-manager
put the following configuration into ~/.Xresources
Xft.dpi: 150\n
load settings
xrdb -merge ~/.Xresources\nexec i3\n
"},{"location":"Operating-Systems/Linux/Window-Manager/i3/i3-wallpaper/","title":"Wallpaper in i3","text":"feh can be used to display a wallpaper.
Define the following in the i3 config file to use a random wallpaper from the path ~/wallpaper/
.
exec --no-startup-id feh --bg-scale --random ~/wallpaper/\n
"},{"location":"Operating-Systems/Linux/Window-Manager/i3/spotify/","title":"Control Spotify in i3","text":"# spotify player controls\nbindsym XF86AudioPlay exec \"dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.PlayPause\"\nbindsym XF86AudioStop exec \"dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Stop\"\nbindsym XF86AudioPrev exec \"dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Previous\"\nbindsym XF86AudioNext exec \"dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.mpris.MediaPlayer2.Player.Next\"\n
"},{"location":"Operating-Systems/Linux/Window-Manager/i3/volume/","title":"Volume Control in i3","text":"A graphical control for PulseAudio is pavucontrol
.
# Use pactl to adjust volume in PulseAudio.\nset $refresh_i3status killall -SIGUSR1 i3status\nbindsym XF86AudioRaiseVolume exec --no-startup-id pactl set-sink-volume @DEFAULT_SINK@ +5% && $refresh_i3status\nbindsym XF86AudioLowerVolume exec --no-startup-id pactl set-sink-volume @DEFAULT_SINK@ -5% && $refresh_i3status\nbindsym XF86AudioMute exec --no-startup-id pactl set-sink-mute @DEFAULT_SINK@ toggle && $refresh_i3status\nbindsym XF86AudioMicMute exec --no-startup-id pactl set-source-mute @DEFAULT_SOURCE@ toggle && $refresh_i3status\n
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/","title":"nixpkgs","text":""},{"location":"Operating-Systems/Linux/nix/nixpkgs/#get-github-checksums","title":"Get GitHub checksums","text":"nix-prefetch-url --unpack https://github.com/catppuccin/bat/archive/f0dedf515c02799b76a2804db9815a479f6c0075.zip\n
REPO=\"\"\n
rm -rf /tmp/repo-check\ngit clone --depth 1 \"$REPO\" /tmp/repo-check\ngit -C /tmp/repo-check rev-parse HEAD\nrm -rf /tmp/repo-check/.git\nnix hash path /tmp/repo-check\n
fetchFromGitHub {\n owner = \"owner\";\n repo = \"repo\";\n rev = \"65bb66d364e0d10d00bd848a3d35e2755654655b\";\n hash = \"sha256-8EUDsWeTeZwJNrtjEsUNLMt9I9mjabPRBZG83u7xtPw=\";\n}\n
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/#build","title":"Build","text":"nix-build -E 'with import <nixpkgs> {}; callPackage ./default.nix {}'\n
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/#test-install","title":"Test-Install","text":"nix-env -iA <package> -f <path to repo>\n
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/#submitting-changes","title":"Submitting Changes","text":"https://nixos.org/manual/nixpkgs/stable/#chap-submitting-changes
"},{"location":"Operating-Systems/Linux/nix/nixpkgs/#maintainer","title":"Maintainer","text":"Add yourself to the nixpkgs/maintainers/maintainer-list.nix
file.
Format
handle = {\n # Required\n name = \"Your name\";\n email = \"address@example.org\";\n # Optional\n matrix = \"@user:example.org\";\n github = \"GithubUsername\";\n githubId = your-github-id;\n keys = [{\n longkeyid = \"rsa2048/0x0123456789ABCDEF\";\n fingerprint = \"AAAA BBBB CCCC DDDD EEEE FFFF 0000 1111 2222 3333\";\n }];\n};\n
"},{"location":"Operating-Systems/Linux/security/selinux/","title":"SELinux","text":""},{"location":"Operating-Systems/Linux/security/selinux/#commands","title":"Commands","text":"See SELinux booleans
getsebool -a\n
Get messages since 14:05
journalctl -t setroubleshoot --since=14:05\n
"},{"location":"Operating-Systems/Linux/security/selinux/#inspection","title":"Inspection","text":"Inspect a AVC message
sealert -l [message_ID]\n
"},{"location":"Operating-Systems/Linux/security/selinux/#flags","title":"Flags","text":"chcon\nrestorecron\n
"},{"location":"Operating-Systems/Linux/security/firewall/firewalld/","title":"firewalld","text":""},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#zones","title":"Zones","text":""},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#list-zones","title":"List Zones","text":"firewall-cmd --get-active-zones\n
"},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#rules","title":"Rules","text":""},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#ports","title":"Ports","text":"firewall-cmd --permanent --zone=public --add-port=25565/tcp --add-port=19132/udp\n
Port Range
firewall-cmd --permanent --zone=public --add-port=40000-40030/udp\n
"},{"location":"Operating-Systems/Linux/security/firewall/firewalld/#remove-ports","title":"Remove Ports","text":"firewall-cmd --permanent --zone=public --remove-port=25565/tcp --remove-port=19132/udp\n
Port Range
firewall-cmd --permanent --zone=public --remove-port=40000-40030/udp\n
"},{"location":"Operating-Systems/Linux/security/firewall/ufw/","title":"UFW","text":""},{"location":"Operating-Systems/Linux/security/firewall/ufw/#get-status","title":"Get status","text":"ufw status verbose\n
"},{"location":"Operating-Systems/Linux/security/firewall/ufw/#rules","title":"Rules","text":""},{"location":"Operating-Systems/Linux/security/firewall/ufw/#ports","title":"Ports","text":"ufw allow 22/tcp\n````\n\n```bash\nufw deny 80/tcp\n````\n\n### Remove Ports\n\n```bash\nufw delete allow 22/tcp\n
"},{"location":"Operating-Systems/Linux/security/firewall/ufw/#block-all-incoming-traffic","title":"Block all incoming traffic","text":"ufw default deny incoming\n
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/","title":"20 rules of formulating knowledge","text":"Effective learning: Twenty rules of formulating knowledge
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#1-do-not-learn-if-you-dont-understand","title":"1. Do not learn if you don't understand","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#2-learn-before-you-memorize","title":"2. Learn before you memorize","text":"You need to build an overall picture of the learned knowledge in order to memorize it. Do not start from memorizing loosely related facts!
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#3-build-upon-the-basics","title":"3. Build upon the basics","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#4-stick-to-the-minimum-information-principle","title":"4. Stick to the minimum information principle","text":"The material you learn must be formulated in as simple way at is is.
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#5-cloze-deletion-is-easy-and-effective","title":"5. Cloze deletion is easy and effective","text":"Cloze deletion is a sentence with its parts missing and replaced by three dots.
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#6-use-imagery","title":"6. Use Imagery","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#7-use-mnemonic-techniques","title":"7. Use mnemonic techniques","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#8-graphic-deletions-is-as-good-as-cloze-deletion","title":"8. Graphic deletions is as good as cloze deletion","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#9-avoid-sets","title":"9. Avoid sets","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#10-avoid-enumeration","title":"10. Avoid enumeration","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#11-combat-interference","title":"11. Combat interference","text":"When you learn about similar things you often confuse them. You need to make items as unambiguous as possible.
"},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#12-optimized-wording","title":"12. Optimized wording","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#13-refer-to-other-memories","title":"13. Refer to other memories","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#14-personalize-and-provide-examples","title":"14. Personalize and provide examples","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#15-rely-on-emotional-states","title":"15. Rely on emotional states","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#16-context-cues-simplify-wording","title":"16. Context cues simplify wording","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#17-redundancy-does-not-contradict-minimum-information-principle","title":"17. Redundancy does not contradict minimum information principle","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#18-provide-sources","title":"18. Provide sources","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#19-provide-data-stamping","title":"19. Provide data stamping","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#20-prioritize","title":"20 Prioritize","text":""},{"location":"Other/20%20rules%20of%20formulating%20knowledge/#summary","title":"Summary","text":"import json\nimport urllib.request as requests\n\nconfig = {\n \"token\": \"blabla\",\n \"duck_domain\": \"cloud-test\",\n \"ipv4\": True,\n \"ipv6\": True\n }\n\nipv4URL = 'https://ipv4.ipleak.net/json/'\nipv6URL = 'https://ipv6.ipleak.net/json/'\n\nif config[\"ipv4\"]:\n request = requests.urlopen(ipv4URL)\n data = json.load(request)\n print(f'IPv4: {json.dumps(data[\"ip\"], indent=2)}')\n\n request = requests.urlopen(f'https://www.duckdns.org/update?domains={config[\"duck_domain\"]}&token={config[\"token\"]}&ip={data[\"ip\"]}')\n if request.status != 200:\n print(request.msg)\n\n\nif config[\"ipv6\"]:\n request = requests.urlopen(ipv6URL)\n data = json.load(request)\n print(f'IPv6: {json.dumps(data[\"ip\"], indent=2)}')\n\n request = requests.urlopen(f'https://www.duckdns.org/update?domains={config[\"duck_domain\"]}&token={config[\"token\"]}&ipv6={data[\"ip\"]}')\n if request.status != 200:\n print(request.msg)\n
"},{"location":"Other/digital-gardening/","title":"Digital Gardening","text":""},{"location":"Other/digital-gardening/#links","title":"Links","text":"A stack is a data structure used to store a collection of objects. Individual items can be added and stored in a stack using a push operation. Objects can be retrieved using a pop operation, which removes an item from the stack.
When an object is added to a stack, it is placed on the top of all previously entered items.
A stack in which items are removed from the top is considered a \"LIFO\" - Last in, First Out. In a \"FIFO\" - First In, First Out items are removed from the bottom.
"},{"location":"Programming-Languages/Bash/","title":"Bash","text":""},{"location":"Programming-Languages/Bash/#notes","title":"Notes","text":"#!/usr/bin/env bash
.~/.bashrc
doesn't have to be a mess{\n \"my_key\": \"my_string\" \n}\n
"},{"location":"Programming-Languages/JSON/#integer-and-floats","title":"Integer and floats","text":"{\n \"my_int\": 2,\n \"my_float\": 3.5\n}\n
"},{"location":"Programming-Languages/JSON/#array","title":"Array","text":"{\n \"my_list\": [\"test\", 5, \"test3\"]\n}\n
"},{"location":"Programming-Languages/JSON/#objects","title":"Objects","text":"{\n \"my_object\": {\n \"name\": \"Test Object\",\n \"childs\": [\n {\n \"name\": \"Child object 1\"\n }\n ]\n }\n}\n
"},{"location":"Programming-Languages/JSON/#links","title":"Links","text":""},{"location":"Programming-Languages/YAML/","title":"YAML","text":"YAML is commonly used for configuration files and in applications where data is being stored or transmitted.
Filenames can end with .yaml
or .yml
.
---\nkey: this is a string\n\nkey: \"this is also a string\"\n\nkey: |\n this is a multi-line\n string with line breaks\n\nkey: >\n this a multi-line \n string withouth line breaks\n
"},{"location":"Programming-Languages/YAML/#integers-and-floats","title":"Integers and floats","text":"---\ninteger: 595\n\nfloat: 12.2\n
"},{"location":"Programming-Languages/YAML/#lists","title":"Lists","text":"---\nlist1: [1, \"two\", 3]\n\nlist2:\n - 1\n - \"two\"\n - 3\n
"},{"location":"Programming-Languages/YAML/#objects","title":"Objects","text":"---\nmy_obj:\n title: My Object\n description: This is a object\n childs:\n - test_obj:\n name: Test Object\n
"},{"location":"Programming-Languages/YAML/#comments","title":"Comments","text":"---\n# this is a comment\n
"},{"location":"Programming-Languages/YAML/#links","title":"Links","text":"GOOS=linux GOARCH=amd64 go build -v\n
"},{"location":"Programming-Languages/Go/Build/#windows","title":"Windows","text":"GOOS=windows GOARCH=amd64 go build -v\n
"},{"location":"Programming-Languages/Go/Build/#helpful-makefile","title":"Helpful Makefile","text":"PROJECT_NAME := \"test-project\"\nPKG := \"github.com/rwxd/$(PROJECT_NAME)\"\nPKG_LIST := $(shell go list ${PKG}/...)\nGO_FILES := $(shell find . -name '*.go' | grep -v _test.go)\n\nhelp:\n @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\nall: build\n\ntest: ## Test go code\n @go test -race ./...\n\ndep: ## Get the dependencies\n @go get -v -d ./...\n\nsetup: ## Install required things\n python3 -m pip install -r requirements-dev.txt\n pre-commit install\n\nbuild: dep build-linux build-windows ## Build for all platforms\n\nbuild-linux: dep ## Build for linux\n @mkdir -p build/\n @GOOS=linux GOARCH=amd64 go build -o build/ -v $(PKG)\n\nbuild-windows: dep ## Build for windows\n @mkdir -p build/\n @GOOS=windows GOARCH=amd64 go build -v -o build/ $(PKG)\n\nclean: ## Remove previous build\n @rm -rf build/\n
"},{"location":"Programming-Languages/Go/Concurrency/","title":"Concurrency in Go","text":""},{"location":"Programming-Languages/Go/Concurrency/#mutex","title":"Mutex","text":"Safely access data across multiple goroutines
func editFile(path string, mu *sync.Mutex){\n mu.Lock()\n defer mu.Unlock()\n // I/O stuff\n}\n
"},{"location":"Programming-Languages/Go/Formatting/","title":"Formatting in Go","text":"The Go development tools include a command, go fmt
, which automatically reformats your code to match the standard format.
import (\n \"runtime/pprof\"\n)\n\nfunc main {\n f, err := os.Create(\"my-tool.prof\")\n if err != nil {\n log.Fatal(err)\n }\n pprof.StartCPUProfile(f)\n defer pprof.StopCPUProfile()\n\n // CPU Intensive code\n}\n
"},{"location":"Programming-Languages/Go/profiling/#view-data","title":"View data","text":"go tool pprof my-tool.prof\n
# view top 10 functions\n(pprof) top\n\n# view top 20 functions\n(pprof) top20\n\n# view top 10 functions in a graph\n(pprof) top --cum\n\n# Visualize graph through web browser\n(pprof) web\n\n# Output graph as a svg\n(pprof) svg\n
"},{"location":"Programming-Languages/Go/profiling/#memory-profiling","title":"Memory Profiling","text":"Go comes with a built-in profiling tool called pprof that can provide detailed information about your application's runtime memory usage.
"},{"location":"Programming-Languages/Go/profiling/#generate-data_1","title":"Generate data","text":"import _ \"net/http/pprof\"\n
Then, add the following code to start a new HTTP server that will serve the pprof endpoints:
go func() {\n log.Println(http.ListenAndServe(\"localhost:6060\", nil))\n}()\n
With the above setup, you can access various profiling data by navigating to http://localhost:6060/debug/pprof/ while your application is running. For memory-related insights, http://localhost:6060/debug/pprof/heap is of particular interest.
"},{"location":"Programming-Languages/Go/profiling/#capture-heap-dump","title":"Capture Heap Dump","text":"Once you have pprof set up and your application is running:
Allow your application to run until you suspect a memory leak. Capture a heap profile by executing:
curl -s http://localhost:6060/debug/pprof/heap -o mem.pprof\n
"},{"location":"Programming-Languages/Go/profiling/#analyze-data","title":"Analyze data","text":"go tool pprof mem.pprof\n
python3 -m cProfile -o log.pstats -m my_module\n
"},{"location":"Programming-Languages/Python/Performance%20Analysis/#visualisation","title":"Visualisation","text":""},{"location":"Programming-Languages/Python/Performance%20Analysis/#gprof2dot-dot-diagram","title":"gprof2dot (Dot Diagram)","text":"sudo pacman -S graphviz\npip3 install gprof2dot\n
gprof2dot -f pstats log.pstats | dot -Tsvg -o log.svg\n
"},{"location":"Programming-Languages/Python/profiling/","title":"Profiling in Python","text":""},{"location":"Programming-Languages/Python/profiling/#generate-data","title":"Generate data","text":""},{"location":"Programming-Languages/Python/profiling/#pythons-integrated-cprofile","title":"Pythons integrated cProfile
","text":"python3 -m cProfile -o profile.pstats -m my_module <args>\n
"},{"location":"Programming-Languages/Python/profiling/#yappi","title":"Yappi","text":"Yappi supports asynchronous and multithreaded profiling, which is not supported by the built-in profiler.
pip3 install -U yappi\n
import yappi\nfrom my_module import my_function\n\nyappi.start()\n\nmy_function()\n\nyappi.stop()\n\nyappi.get_func_stats().save(\"profile.pstats\", type=\"pstats\")\n
"},{"location":"Programming-Languages/Python/profiling/#visualisation","title":"Visualisation","text":""},{"location":"Programming-Languages/Python/profiling/#gprof2dot-dot-diagram-svg","title":"gprof2dot (Dot Diagram, SVG)","text":"Transform a .pstats
file with gprof2dot into a dot graph as a svg file.
pip3 install -U gprof2dot\n
gprof2dot -f pstats profile.pstats | dot -Tsvg -o profile.svg\n
"},{"location":"Programming-Languages/Python/profiling/#snakeviz-interactive","title":"Snakeviz (Interactive)","text":"Snakeviz is a web-based profiling tool which allows users to analyse their code by filtering data by module, function and file, and sorting it according to different criteria such as the number of calls or cumulative time spent in a function.
pip3 install -U snakeviz\n
snakeviz profile.pstats\n
"},{"location":"Programming-Languages/Python/profiling/#flamegraph-svg","title":"flamegraph (SVG)","text":"Flame graphs are visual tools that show how much time is spent in each function call. The width of each bar in the graph represents the amount of time spent in that function, with wider bars indicating more time spent and narrower bars indicating less time. The main function is at the bottom, and the subfunctions are stacked vertically on top.
pip3 install -U flameprof\n
flameprof profile.pstats > profile.svg\n
"},{"location":"Programming-Languages/Python/typing-in-python/","title":"Typing in Python","text":"In Python typing can be optionally used. To check typing the standard tool is MyPy.
"},{"location":"Programming-Languages/Python/typing-in-python/#usage","title":"Usage","text":""},{"location":"Programming-Languages/Python/typing-in-python/#function-annotations","title":"Function annotations","text":"def func(arg: arg_type, optarg: arg_type = default) -> return_type: \n...\n
For arguments the syntax is argument: annotation
, while the return type is annotated using -> annotation
. Note that the annotation must be a valid Python expression.
Sometimes the type checker needs help in figuring out the types of variables as well. The syntax is similar:
pi: float = 3.142\n\ndef circumference(radius: float) -> float:\n return 2 * pi * radius`\n
"},{"location":"Programming-Languages/Python/typing-in-python/#links","title":"Links","text":"Create a virtual environment python3 -m virtualenv .venv
or python3 -m venv .venv
Get current path
from pathlib import Path\nSTATIC_FILES_DIR = Path(__file__).parent.absolute()\n
"},{"location":"Programming-Languages/Python/Python-Libraries/Pathlib/#links","title":"Links","text":""},{"location":"Programming-Languages/Python/Python-Libraries/PyTest/","title":"PyTest","text":""},{"location":"Programming-Languages/Python/Python-Libraries/PyTest/#mocks","title":"Mocks","text":"For mocking with PyTest the unittest.mock
library is used.
import json\nimport pytest\n\n@pytest.fixture\ndef json_loader():\n \"\"\"Loads data from JSON file\"\"\"\n\n def _loader(filename):\n with open(filename, 'r') as f:\n print(filename)\n data = json.load(f)\n return data\n\n return _loader\n\n\ndef test_wrong_stop(client, mocker, json_loader):\n # Arrange\n get_mock = mocker.MagicMock()\n get_mock.status_code = 200\n get_mock.json.return_value = json_loader(\n cta_error_incorrect_stop_response.json)\n mocker.patch.object(\n backend.cta.requests,\n 'get',\n return_value=get_mock,\n )\n\n # Act\n response = client.simulate_get('/stops/106')\n\n # Assert\n assert response.status == falcon.HTTP_200\n assert response.json == {'error': 'stop_id: 106 does not exist\n
"},{"location":"Programming-Languages/Python/Python-Libraries/PyTest/#links","title":"Links","text":"https://github.com/mkb79/audible-cli
"},{"location":"Tools/Audible-Cli/#setup","title":"Setup","text":""},{"location":"Tools/Audible-Cli/#authfile","title":"Authfile","text":"audible manage auth-file add --password \"<password>\"\n
"},{"location":"Tools/Audible-Cli/#profile","title":"Profile","text":"audible manage profile add\n
"},{"location":"Tools/Audible-Cli/#download-all-audio-books-to-the-current-directory","title":"Download all audio books to the current directory","text":"audible -P default -v info download --all --aax --ignore-podcasts --jobs 3 --ignore-errors\n
"},{"location":"Tools/Audible-Cli/#convert-aax-to-mp3","title":"Convert aax to mp3","text":"https://github.com/KrumpetPirate/AAXtoMP3
"},{"location":"Tools/Audible-Cli/#get-the-auth-token-from-audible-cli","title":"Get the auth token from audible-cli","text":"audible -P default activation-bytes\n
"},{"location":"Tools/Audible-Cli/#convert-aax-to-mp3_1","title":"Convert aax to mp3","text":"aaxtomp3 -e:mp3 --level 5 -s --authcode <authcode> --loglevel 1 <file.aax>\n
"},{"location":"Tools/Audible-Cli/#convert-all-aax-to-mp3","title":"Convert all aax to mp3","text":"find . -name \"*.aax\" -exec aaxtomp3 -e:mp3 --level 5 -s --authcode <authcode> --loglevel 1 --complete_dir <path> {} \\;\n
"},{"location":"Tools/Bitwarden/","title":"Bitwarden","text":"Bitwarden is a open source password manager with cloud synchronization and the option to host the sync server on your own hardware.
"},{"location":"Tools/Bitwarden/#links","title":"Links","text":"Use Jinja2 templating in boilerplates for new projects.
"},{"location":"Tools/Cookiecutter/#usage","title":"Usage","text":"python3 -m pip install cookiecutter\ncookiecutter gh:rwxd/cookiecutter-gh-project\n
"},{"location":"Tools/HashiCorp-Vault/","title":"HashiCorp Vault","text":"HashiCorp Vault can be used to store things like passwords, certificates and encryption keys.
"},{"location":"Tools/HashiCorp-Vault/#usage","title":"Usage","text":""},{"location":"Tools/HashiCorp-Vault/#cli","title":"CLI","text":"Login to a vault server with a token vault login -address=https://vault.net -method=token
List kv entries vault kv list network/services
Get a kv entry vault get network/services/ipam
Headscale is a self-hosted, open source implementation of the Tailscale control server.
"},{"location":"Tools/Headscale/#connect-a-client-to-the-server","title":"Connect a client to the server","text":""},{"location":"Tools/Headscale/#create-a-user","title":"Create a user","text":"In case you don't have a user yet, you can create one with the following command:
headscale users create <user>\n
"},{"location":"Tools/Headscale/#get-an-authkey-for-the-user","title":"Get an authkey for the user","text":"headscale --user <user> preauthkeys create --reusable --expiration 1h\n
"},{"location":"Tools/Headscale/#authenticate-tailscale-client","title":"Authenticate tailscale client","text":"tailscale up --login-server <headscale url> --authkey <authkey>\n
"},{"location":"Tools/Headscale/#check-status","title":"Check status","text":"tailscale status\n
"},{"location":"Tools/KeepassXC/","title":"KeePassXC","text":"KeePassXC is a open source password manager which uses a local password database file.
To sync the database with different devices an external cloud service like Dropbox or OneDrive is needed.
KeePassXC has the functionality to store SSH keys and inject the keys into the SSH agent.
"},{"location":"Tools/KeepassXC/#links","title":"Links","text":"MkDocs is a fast, simple and downright gorgeous static site generator that's geared towards building project documentation. Documentation source files are written in Markdown, and configured with a single YAML configuration file. -- https://www.mkdocs.org/
"},{"location":"Tools/MkDocs/#usage","title":"Usage","text":"View the website local with: mkdocs serve
.
Build the website with: mkdocs build
.
The default export directory is ./site
.
Task is a runner / built tool.
The configuration is written in a Taskfile.yml
Taskfile Template
# https://taskfile.dev \n\nversion: '3' \n\nvars: \nGREETING: Hello, World! \n\ntasks: \ndefault: \ncmds: \n- echo \"{{.GREETING}}\" \nsilent: true\n
"},{"location":"Tools/Task/#usage","title":"Usage","text":"Init a Taskfile template task --init
List tasks task -l
or task --list
Use vars at global or task level
vars:\n CONTAINER_NAME: wiki.rwxd.eu\n CURRENT_DIR:\n sh: pwd\n SITE_DIR: \"{{.CURRENT_DIR}}/site\"\n
"},{"location":"Tools/Task/#links","title":"Links","text":""},{"location":"Tools/autorestic/","title":"autorestic - High backup level CLI utility for restic.","text":"Documentation
The commands will work with the configuration saved to ~/.autorestic.yaml
you can also specify a different config file with the -c
flag.
The --ci
flag is used for the exec
command to prevent colors from being printed.
autorestic check\n
"},{"location":"Tools/autorestic/#backup","title":"Backup","text":"# all\nautorestic backup --all\n\n# specific locations\nautorestic backup --locations \"<location1>,<location2>\"\n
"},{"location":"Tools/autorestic/#show-stats-for-a-backend","title":"Show stats for a backend","text":"autorestic --ci exec -vb <backend> stats\n
"},{"location":"Tools/autorestic/#show-snapshots-for-a-backend","title":"Show snapshots for a backend","text":"autorestic --ci exec -vb <backend> snapshots\n
"},{"location":"Tools/autorestic/#check-a-backend-for-errors","title":"Check a backend for errors","text":"autorestic --ci exec -vb <backend> check\n
"},{"location":"Tools/autorestic/#mount-repository-on-backend","title":"Mount repository on backend","text":"mkdir -p /mnt/restic\nautorestic --ci exec -vb <backend> mount -- /mnt/restic\n
"},{"location":"Tools/borg/","title":"Borg","text":""},{"location":"Tools/borg/#delete-directory-from-all-backups","title":"Delete directory from all backups","text":"Dry-Run
borg recreate <archive> --dry-run --list --verbose --exclude <path>\n
Delete
borg recreate <archive> --list --verbose --exclude <path>\n
"},{"location":"Tools/cht.sh/","title":"cht.sh","text":""},{"location":"Tools/cht.sh/#links","title":"Links","text":"sequenceDiagram\nAlice->>John: Hello John, how are you?\nloop Healthcheck\n John->>John: Fight against hypochondria\nend\nNote right of John: Rational thoughts!\nJohn-->>Alice: Great!\nJohn->>Bob: How about you?\nBob-->>John: Jolly good!\n
"},{"location":"Tools/openssl/","title":"Open SSL","text":""},{"location":"Tools/openssl/#generate-passwords","title":"Generate passwords","text":"openssl passwd -6 -salt xyz
Arch yay -S spicetify-cli
generate config spicetify
apply config
spicetify backup apply\nspicetify apply\n
change theme spicetify config current_theme THEME_NAME
change color scheme spicetify config color_scheme SCHEME_NAME
when Spotify is installed through AUR
sudo chmod a+wr /opt/spotify\nsudo chmod a+wr /opt/spotify/Apps -R\n
"},{"location":"Tools/spicetify/#links","title":"Links","text":"With BFG large or troublesome files can be removed from a Git Repository
The Git repo should be cloned with --mirror
Delete a file in a Git repository and force push the new commit history.
bfg --delete-files file.md\ngit reflog expire --expire=now --all && git gc --prune=now --aggressive\ngit push --force\n
"},{"location":"Tools/Git/bfg-repo-cleaner/#secrest","title":"Secrest","text":"A file with a list of secrets can be used to remove all occurrences in the git repository
leaked-passwords.txt
PASSWORD1 # Replace literal string 'PASSWORD1' with '***REMOVED***' (default)\nPASSWORD2==>examplePass # replace with 'examplePass' instead\nPASSWORD3==> # replace with the empty string\nregex:password=\\w+==>password= # Replace, using a regex\nregex:\\r(\\n)==>$1 # Replace Windows newlines with Unix newlines\n
bfg --replace-text leaked-passwords.txt\n
git reflog expire --expire=now --all && git gc --prune=now --aggressive\n
"},{"location":"Tools/Git/git-crypt/","title":"Git Crypt","text":""},{"location":"Tools/Git/git-crypt/#how-to","title":"How to","text":""},{"location":"Tools/Git/git-crypt/#init","title":"Init","text":"Initialize repository with git-crypt init
Create a .gitattributes
file
touch .gitattributes\n
The .gitattatributes
file contains lines in the following form:
[file pattern] attr1=value1 attr2=value2\n
"},{"location":"Tools/Git/git-crypt/#example","title":"Example","text":"If we want to encrypt the file config.yml
, the .gitattatributes
should contain the following:
config.yml filter=git-crypt diff=git-crypt\n
With git-crypt status
we can see that our file will be encrypted on push to our remote repository.
\u276f git-crypt status | grep \"config.yml\"\n encrypted: config.yml\n
"},{"location":"Tools/Git/git-crypt/#locking","title":"Locking","text":"With git-crypt lock
and git-crypt unlock
the repository can be unlocked at will.
git-crypt add-gpg-user KEYID
docker run -v \"$PWD\":/path ghcr.io/zricethezav/gitleaks:v8.8.12 detect -f json -r \"/path/report-secrets.json\" --source=\"/path\"\n
Extract unique secrets to extracted-secrets
cat report-secrets.json | jq -n -r 'inputs[].Secret' | sort -u > extracted-secrets\n
"},{"location":"Tools/Git/gitleak/#clear-secrets-from-repository","title":"Clear secrets from repository","text":"Use (bfg)[../bfg-repo-cleaner.md]
Prepare with:
bfg --replace-text extracted-secrets\n
Clean secrets with:
git reflog expire --expire=now --all && git gc --prune=now --aggressive\n
"},{"location":"Tools/Git/GitLab/access-tokens/","title":"GitLab Access Tokens","text":""},{"location":"Tools/Git/GitLab/access-tokens/#clone","title":"Clone","text":"Clone with an access token git clone https://$project_name:$token@$gitlab/$project_path.git
Use in init.vim
let g:coc_global_extensions = [\n \\ 'coc-pyright',\n \\ 'coc-prettier',\n \\ 'coc-git',\n \\ 'coc-json',\n \\ 'coc-docker',\n \\ 'coc-yaml',\n \\ 'coc-html',\n \\ 'coc-sh',\n \\ 'coc-go',\n \\ '@yaegassy/coc-ansible',\n \\ ]\n
"},{"location":"Tools/PlantUML/PlantUML%20Themes/","title":"PlantUML Themes","text":"An overview of all available themes can be seen in the Theme Gallery.
"},{"location":"Tools/PlantUML/PlantUML%20Themes/#usage","title":"Usage","text":"Set a theme
Get all themes with help themes
g g \" + y G
%y+
echo \"deb http://download.proxmox.com/debian/pve bookworm pve-no-subscription\" > /etc/apt/sources.list.d/pve-community.list\n
"},{"location":"Virtualization/ProxMox/proxmox-8-apt-updates/#comment-out-the-enterprise-repository-at-etcaptsourceslistdpve-enterpriselist","title":"Comment out the enterprise repository at /etc/apt/sources.list.d/pve-enterprise.list","text":"sed -i 's/^deb/#deb/' /etc/apt/sources.list.d/pve-enterprise.list\n
"},{"location":"Virtualization/ProxMox/proxmox-8-apt-updates/#change-the-ceph-repository-at-etcaptsourceslistdcephlist","title":"Change the ceph repository at /etc/apt/sources.list.d/ceph.list","text":"sed -i 's/^deb/#deb/' /etc/apt/sources.list.d/ceph.list\necho \"deb http://download.proxmox.com/debian/ceph-quincy bookworm no-subscription\" >> /etc/apt/sources.list.d/ceph.list\n
"},{"location":"Virtualization/ProxMox/proxmox-cpu-consumption/","title":"Reduce CPU consumption on Proxmox","text":"cpufrequtils
via apt install cpufrequtils
apt update && apt install cpufrequtils\n
cpufreq-info -g
cpufreq-info -g\n
cpufreq-info -p\n
powersave
via cpufreq-set -g powersave
cpufreq-set -g powersave\n
echo 'GOVERNOR=\"powersave\"' | tee /etc/default/cpufrequtils\n
Powertop is a tool to diagnose issues with power consumption and power management. It can also be used to tune power management settings.
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#install-powertop","title":"Install powertop","text":"apt install powertop\n
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#run-powertop-calibration","title":"Run powertop calibration","text":"Calibration will toggle various functions on and off to determine the best settings for your system. So it is best to run this when the system is idle.
powertop --calibrate\n
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#run-powertop-to-see-recommendations","title":"Run powertop to see recommendations","text":"With you can switch between the different tabs.
powertop\n
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#auto-tune-power-management-settings-not-reboot-persistent","title":"Auto tune power management settings (not reboot persistent)","text":"powertop --auto-tune\n
"},{"location":"Virtualization/ProxMox/proxmox-energy-consumption/#systemd-service-to-auto-tune-power-management-settings-reboot-persistent","title":"Systemd service to auto tune power management settings (reboot persistent)","text":"cat << EOF > /etc/systemd/system/powertop.service\n[Unit]\nDescription=Powertop tunings\n\n[Service]\nType=oneshot\nRemainAfterExit=yes\nExecStart=/usr/sbin/powertop --auto-tune\n\n[Install]\nWantedBy=multi-user.target\nEOF\n\nsystemctl enable --now powertop.service\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/","title":"Passtrough a hard drive from the Proxmox host to a VM","text":""},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#find-the-hard-drive-copy-the-uuid","title":"Find the hard drive & copy the UUID","text":"lsblk -o NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#find-the-vm-id","title":"Find the vm id","text":"qm list\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#passtrough-the-hard-drive-as-scsi","title":"Passtrough the hard drive as scsi","text":"qm set $vm_id -scsi2 /dev/disk/by-uuid/$disk_uuid\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#restart-the-vm","title":"Restart the vm","text":"qm reboot $vm_id\n
"},{"location":"Virtualization/ProxMox/proxmox-passtrough-hard-drive/#in-case-it-should-be-removed","title":"In case it should be removed","text":"qm unlink $vm_id --idlist scsi2\n
"}]}
\ No newline at end of file
diff --git a/sitemap.xml.gz b/sitemap.xml.gz
index b8cd9fd5..c372eb98 100644
Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ