Apply auto-formatting from trunk fmt

This commit is contained in:
awalsh128 2025-10-11 21:41:23 -07:00
parent 7c7a6933bd
commit fc79483542
40 changed files with 623 additions and 2734 deletions

View file

@ -1,9 +1,11 @@
---
name: Feature Request
about: Suggest an idea or enhancement for this action
title: "[FEATURE] "
title: "\[FEATURE] "
labels: enhancement
assignees: awalsh128
---
## Feature Summary
@ -12,7 +14,8 @@ A clear and concise description of what you want to happen.
## Problem Statement
What problem would this feature solve? Is your feature request related to a problem you're experiencing?
What problem would this feature solve? Is your feature request related to a
problem you're experiencing?
```txt
Example: I'm frustrated when [specific scenario] because [reason]
@ -28,7 +31,8 @@ Describe any alternative solutions or features you've considered.
## Use Case
Describe your specific use case and how this feature would benefit you and others.
Describe your specific use case and how this feature would benefit you and
others.
```yaml
# Example workflow showing how the feature would be used

View file

@ -1,15 +1,11 @@
name: Test Action
permissions:
contents: read
env:
DEBUG: false
on:
# Manual trigger (no inputs allowed per Trunk rule)
workflow_dispatch:
push:
branches: [dev-v2.0] # Test on pushes to dev branch
paths:
@ -24,7 +20,6 @@ on:
- internal/** # Only when action code changes
- action.yml
- .github/workflows/action_tests.yml
jobs:
list_all_versions:
runs-on: ubuntu-latest
@ -36,7 +31,6 @@ jobs:
with:
# Use the event ref/sha by default; do not accept user-controlled ref inputs
fetch-depth: 0
# Run the action from the checked out code
- name: Execute
id: execute
@ -46,9 +40,7 @@ jobs:
version: ${{ github.run_id }}-${{ github.run_attempt }}-list_all_versions
debug: false
- name: Verify
if: |
steps.execute.outputs.cache-hit != 'false' ||
steps.execute.outputs.all-package-version-list != 'fonts-liberation2=1:2.1.5-3,gir1.2-atk-1.0=2.52.0-1build1,gir1.2-freedesktop=1.80.1-1,gir1.2-gdkpixbuf-2.0=2.42.10+dfsg-3ubuntu3.2,gir1.2-gtk-3.0=3.24.41-4ubuntu1.3,gir1.2-harfbuzz-0.0=8.3.0-2build2,gir1.2-pango-1.0=1.52.1+ds-1build1,graphviz=2.42.2-9ubuntu0.1,libann0=1.1.2+doc-9build1,libblas3=3.12.0-3build1.1,libcdt5=2.42.2-9ubuntu0.1,libcgraph6=2.42.2-9ubuntu0.1,libgts-0.7-5t64=0.7.6+darcs121130-5.2build1,libgts-bin=0.7.6+darcs121130-5.2build1,libgvc6=2.42.2-9ubuntu0.1,libgvpr2=2.42.2-9ubuntu0.1,libharfbuzz-gobject0=8.3.0-2build2,liblab-gamut1=2.42.2-9ubuntu0.1,liblapack3=3.12.0-3build1.1,libpangoxft-1.0-0=1.52.1+ds-1build1,libpathplan4=2.42.2-9ubuntu0.1,python3-cairo=1.25.1-2build2,python3-gi-cairo=3.48.2-1,python3-numpy=1:1.26.4+ds-6ubuntu1,xdot=1.3-1'
if: "steps.execute.outputs.cache-hit != 'false' || \nsteps.execute.outputs.all-package-version-list != 'fonts-liberation2=1:2.1.5-3,gir1.2-atk-1.0=2.52.0-1build1,gir1.2-freedesktop=1.80.1-1,gir1.2-gdkpixbuf-2.0=2.42.10+dfsg-3ubuntu3.2,gir1.2-gtk-3.0=3.24.41-4ubuntu1.3,gir1.2-harfbuzz-0.0=8.3.0-2build2,gir1.2-pango-1.0=1.52.1+ds-1build1,graphviz=2.42.2-9ubuntu0.1,libann0=1.1.2+doc-9build1,libblas3=3.12.0-3build1.1,libcdt5=2.42.2-9ubuntu0.1,libcgraph6=2.42.2-9ubuntu0.1,libgts-0.7-5t64=0.7.6+darcs121130-5.2build1,libgts-bin=0.7.6+darcs121130-5.2build1,libgvc6=2.42.2-9ubuntu0.1,libgvpr2=2.42.2-9ubuntu0.1,libharfbuzz-gobject0=8.3.0-2build2,liblab-gamut1=2.42.2-9ubuntu0.1,liblapack3=3.12.0-3build1.1,libpangoxft-1.0-0=1.52.1+ds-1build1,libpathplan4=2.42.2-9ubuntu0.1,python3-cairo=1.25.1-2build2,python3-gi-cairo=3.48.2-1,python3-numpy=1:1.26.4+ds-6ubuntu1,xdot=1.3-1'\n"
run: |
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
echo "package-version-list = ${{ steps.execute.outputs.package-version-list }}"
@ -57,7 +49,6 @@ jobs:
diff <(echo "${{ steps.execute.outputs.all-package-version-list }}" ) <(echo "fonts-liberation2=1:2.1.5-3,gir1.2-atk-1.0=2.52.0-1build1,gir1.2-freedesktop=1.80.1-1,gir1.2-gdkpixbuf-2.0=2.42.10+dfsg-3ubuntu3.2,gir1.2-gtk-3.0=3.24.41-4ubuntu1.3,gir1.2-harfbuzz-0.0=8.3.0-2build2,gir1.2-pango-1.0=1.52.1+ds-1build1,graphviz=2.42.2-9ubuntu0.1,libann0=1.1.2+doc-9build1,libblas3=3.12.0-3build1.1,libcdt5=2.42.2-9ubuntu0.1,libcgraph6=2.42.2-9ubuntu0.1,libgts-0.7-5t64=0.7.6+darcs121130-5.2build1,libgts-bin=0.7.6+darcs121130-5.2build1,libgvc6=2.42.2-9ubuntu0.1,libgvpr2=2.42.2-9ubuntu0.1,libharfbuzz-gobject0=8.3.0-2build2,liblab-gamut1=2.42.2-9ubuntu0.1,liblapack3=3.12.0-3build1.1,libpangoxft-1.0-0=1.52.1+ds-1build1,libpathplan4=2.42.2-9ubuntu0.1,python3-cairo=1.25.1-2build2,python3-gi-cairo=3.48.2-1,python3-numpy=1:1.26.4+ds-6ubuntu1,xdot=1.3-1")
exit 1
shell: bash
list_versions:
runs-on: ubuntu-latest
name: List package versions.
@ -72,14 +63,8 @@ jobs:
debug: false
- name: Verify
if: steps.execute.outputs.cache-hit != 'false' || steps.execute.outputs.package-version-list != 'rolldice=1.16-1build3,xdot=1.3-1'
run: |
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
echo "package-version-list = ${{ steps.execute.outputs.package-version-list }}"
echo "diff package-version-list"
diff <(echo "${{ steps.execute.outputs.package-version-list }}" ) <(echo "rolldice=1.16-1build3,xdot=1.3-1")
exit 1
run: "echo \"cache-hit = ${{ steps.execute.outputs.cache-hit }}\" \necho \"package-version-list = ${{ steps.execute.outputs.package-version-list }}\"\necho \"diff package-version-list\"\ndiff <(echo \"${{ steps.execute.outputs.package-version-list }}\" ) <(echo \"rolldice=1.16-1build3,xdot=1.3-1\")\nexit 1\n"
shell: bash
standard_workflow_install:
runs-on: ubuntu-latest
name: Standard workflow install package and cache.
@ -98,7 +83,6 @@ jobs:
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
exit 1
shell: bash
standard_workflow_install_with_new_version:
needs: standard_workflow_install
runs-on: ubuntu-latest
@ -118,7 +102,6 @@ jobs:
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
exit 1
shell: bash
standard_workflow_restore:
needs: standard_workflow_install
runs-on: ubuntu-latest
@ -138,7 +121,6 @@ jobs:
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
exit 1
shell: bash
standard_workflow_restore_with_packages_out_of_order:
needs: standard_workflow_install
runs-on: ubuntu-latest
@ -158,7 +140,6 @@ jobs:
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
exit 1
shell: bash
standard_workflow_add_package:
needs: standard_workflow_install
runs-on: ubuntu-latest
@ -178,7 +159,6 @@ jobs:
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
exit 1
shell: bash
standard_workflow_restore_add_package:
needs: standard_workflow_add_package
runs-on: ubuntu-latest
@ -198,7 +178,6 @@ jobs:
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
exit 1
shell: bash
no_packages:
runs-on: ubuntu-latest
name: No packages passed.
@ -213,7 +192,6 @@ jobs:
if: steps.execute.outcome == 'failure'
run: exit 0
shell: bash
package_not_found:
runs-on: ubuntu-latest
name: Package not found.
@ -228,7 +206,6 @@ jobs:
if: steps.execute.outcome == 'failure'
run: exit 0
shell: bash
version_contains_spaces:
runs-on: ubuntu-latest
name: Version contains spaces.
@ -245,7 +222,6 @@ jobs:
if: steps.execute.outcome == 'failure'
run: exit 0
shell: bash
regression_36:
runs-on: ubuntu-latest
name: "Reinstall existing package (regression issue #36)."
@ -256,7 +232,6 @@ jobs:
packages: libgtk-3-dev
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_36
debug: ${{ env.DEBUG }}
regression_37:
runs-on: ubuntu-latest
name: "Install with reported package dependencies not installed (regression issue #37)."
@ -267,7 +242,6 @@ jobs:
packages: libosmesa6-dev libgl1-mesa-dev python3-tk pandoc git-restore-mtime
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_37
debug: ${{ env.DEBUG }}
debug_disabled:
runs-on: ubuntu-latest
name: Debug disabled.
@ -278,7 +252,6 @@ jobs:
packages: xdot
version: ${{ github.run_id }}-${{ github.run_attempt }}-list-all-package-versions
debug: false
regression_72_1:
runs-on: ubuntu-latest
name: "Cache Java CA certs package v1 (regression issue #72)."
@ -289,7 +262,6 @@ jobs:
packages: openjdk-11-jre
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_72
debug: ${{ env.DEBUG }}
regression_72_2:
runs-on: ubuntu-latest
name: "Cache Java CA certs package v2 (regression issue #72)."
@ -300,7 +272,6 @@ jobs:
packages: default-jre
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_72
debug: ${{ env.DEBUG }}
regression_76:
runs-on: ubuntu-latest
name: "Cache empty archive (regression issue #76)."
@ -318,7 +289,6 @@ jobs:
packages: intel-oneapi-runtime-libs
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_76
debug: ${{ env.DEBUG }}
regression_79:
runs-on: ubuntu-latest
name: "Tar error with libboost-dev (regression issue #79)."
@ -329,7 +299,6 @@ jobs:
packages: libboost-dev
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_79
debug: ${{ env.DEBUG }}
regression_81:
runs-on: ubuntu-latest
name: "Tar error with alsa-ucm-conf (regression issue #81)."
@ -340,7 +309,6 @@ jobs:
packages: libasound2 libatk-bridge2.0-0 libatk1.0-0 libatspi2.0-0 libcups2 libdrm2 libgbm1 libnspr4 libnss3 libxcomposite1 libxdamage1 libxfixes3 libxkbcommon0 libxrandr2
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_81
debug: ${{ env.DEBUG }}
regression_84_literal_block_install:
runs-on: ubuntu-latest
name: "Install multiline package listing using literal block style (regression issue #84)."
@ -349,11 +317,10 @@ jobs:
- uses: ./
with:
packages: >
xdot
rolldice distro-info-data
xdot rolldice distro-info-data
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_84_literal_block
debug: ${{ env.DEBUG }}
regression_84_literal_block_restore:
needs: regression_84_literal_block_install
runs-on: ubuntu-latest
@ -373,7 +340,6 @@ jobs:
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
exit 1
shell: bash
regression_84_folded_block_install:
runs-on: ubuntu-latest
name: "Install multiline package listing using literal block style (regression issue #84)."
@ -386,7 +352,6 @@ jobs:
rolldice distro-info-data
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_84_folded_block
debug: ${{ env.DEBUG }}
regression_84_folded_block_restore:
needs: regression_84_folded_block_install
runs-on: ubuntu-latest
@ -406,7 +371,6 @@ jobs:
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
exit 1
shell: bash
regression_89:
runs-on: ubuntu-latest
name: "Upload logs artifact name (regression issue #89)."
@ -417,7 +381,6 @@ jobs:
packages: libgtk-3-dev:amd64
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_89
debug: ${{ env.DEBUG }}
regression_98:
runs-on: ubuntu-latest
name: "Install error due to SHELLOPTS override (regression issue #98)."
@ -428,7 +391,6 @@ jobs:
packages: git-restore-mtime libgl1-mesa-dev libosmesa6-dev pandoc
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_98
debug: ${{ env.DEBUG }}
regression_106_install:
runs-on: ubuntu-latest
name: "Stale apt repo not finding package on restore, install phase (regression issue #106)."
@ -439,7 +401,6 @@ jobs:
packages: libtk8.6
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_106
debug: ${{ env.DEBUG }}
regression_106_restore:
needs: regression_106_install
runs-on: ubuntu-latest
@ -451,7 +412,6 @@ jobs:
packages: libtk8.6
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_106
debug: ${{ env.DEBUG }}
regression_159_install:
runs-on: ubuntu-latest
name: apt-show false positive parsing Package line (regression issue #159).
@ -462,7 +422,6 @@ jobs:
packages: texlive-latex-extra
version: ${{ github.run_id }}-${{ github.run_attempt }}-regression_159
debug: ${{ env.DEBUG }}
multi_arch_cache_key:
runs-on: ubuntu-latest
name: Cache packages with multi-arch cache key.
@ -473,7 +432,6 @@ jobs:
packages: libfuse2
version: ${{ github.run_id }}-${{ github.run_attempt }}-multi_arch_cache_key
debug: ${{ env.DEBUG }}
virtual_package:
runs-on: ubuntu-latest
name: Cache virtual package.

View file

@ -1,8 +1,6 @@
name: CI
permissions:
contents: read
on:
push:
branches: [dev-v2.0]
@ -12,88 +10,74 @@ on:
schedule:
- cron: 0 0 * * * # Run at 00:00 UTC every day
workflow_dispatch:
env:
DEBUG: false
SHELLOPTS: errexit:pipefail
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Read Go version from .env
id: go-version
run: |
echo "go_version=$(grep '^GO_VERSION=' .env | cut -d '=' -f2)" >> $GITHUB_OUTPUT
- uses: actions/setup-go@v5
with:
go-version: ${{ steps.go-version.outputs.go_version }}
cache: true
- name: Install Go module dependencies
run: go mod download
- name: Build
run: go build -v ./...
- name: trunk.io Lint
uses: trunk-io/trunk-action@v1
with:
arguments: check
- name: Test with coverage
run: go test -v -race -coverprofile=coverage.txt -covermode=atomic ./...
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
with:
files: ./coverage.txt
fail_ci_if_error: true
validate-scripts:
runs-on: ubuntu-latest
needs: build
steps:
- uses: actions/checkout@v4
- name: Check script syntax
run: |
for script in scripts/*.sh; do
for script in scripts/dev/*.sh; do
echo "Checking syntax for $script"
bash -n "$script"
done
- name: Check scripts are executable
run: |
for script in scripts/*.sh; do
for script in scripts/dev/*.sh; do
if [[ ! -x "$script" ]]; then
echo "::error::Script $script is not executable. Run 'chmod +x $script' and commit the changes."
exit 1
fi
done
- name: Check menu integration
run: |
echo "Checking if all scripts are integrated in menu.sh..."
for script in scripts/*.sh; do
for script in scripts/dev/*.sh; do
# Skip menu.sh itself
if [[ "$(basename "$script")" == "menu.sh" ]]; then
continue
fi
# Look for the script path in menu.sh
if ! grep -q "\".*$(basename "$script")\"" scripts/menu.sh; then
if ! grep -q "\".*$(basename "$script")\"" scripts/dev/menu.sh; then
echo "::error::Script $(basename "$script") is not integrated in menu.sh"
exit 1
fi
done
- name: Run script tests
run: |
if [[ -d "scripts/tests" ]]; then
for test in scripts/tests/*_test.sh; do
if [[ -d "scripts/dev/tests" ]]; then
for test in scripts/dev/tests/*_test.sh; do
if [[ -f "$test" ]]; then
echo "Running test: $test"
bash "$test"

View file

@ -1,45 +0,0 @@
on:
workflow_dispatch:
permissions:
contents: read
pull-requests: write
jobs:
# TODO finish debugging check_and_fix_env.sh to start using this
modify-pr:
runs-on: ubuntu-latest
name: Check and fix PR
env:
REQUIRES_FIX: ""
SAFE_HEAD_REF: ${{ github.head_ref }}
steps:
- name: Checkout PR branch
uses: actions/checkout@v4
with:
ref: ${{ env.SAFE_HEAD_REF }}
- name: Check and fix if needed
shell: bash
run: |
set -euo pipefail
./scripts/check_and_fix_env.sh
status=$?
echo "REQUIRES_FIX=$status" >> $GITHUB_ENV
if [[ "$status" != "0" ]]; then
echo "$status changes were made, applying fix."
fi
- name: Commit and push changes
shell: bash
if: env.REQUIRES_FIX != '0'
run: |
set -euo pipefail
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add .
git commit -m "Automated update from \"Check and fix PR\" workflow" || echo "No changes to commit"
git push origin HEAD:"${SAFE_HEAD_REF}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SAFE_HEAD_REF: ${{ env.SAFE_HEAD_REF }}

View file

@ -1,21 +0,0 @@
name: Release Version
permissions:
contents: read
on:
push:
tags: [v2.*] # Trigger on version tags >= 2.0.0
# manual dispatch removed to comply with Trunk rule (use push tags instead)
jobs:
update-pkg-go-dev:
runs-on: ubuntu-latest
steps:
- name: Update pkg.go.dev
run: |
VERSION=${{ github.ref_name || github.sha }}
echo "Updating pkg.go.dev for version $VERSION"
curl -i https://proxy.golang.org/github.com/awalsh128/cache-apt-pkgs-action/@v/$VERSION.info
# Trigger a package load
GOPROXY=https://proxy.golang.org GO111MODULE=on go get github.com/awalsh128/cache-apt-pkgs-action@$VERSION

7
.gitignore vendored
View file

@ -5,7 +5,10 @@
# Don't ignore the main .env file
!.env
# Local Go binaries from build command
cache_apt_pkgs*
# Local VS Code environment (contains secrets)
.vscode/.env
# Distribute artifacts (built by CI)
distribute/
scripts/sandbox.sh

View file

@ -32,7 +32,7 @@ MD033:
MD034: true
# MD035 hr-style - Horizontal rule style
MD035:
style: ---
style: "---"
# MD041 first-line-heading - First line should be a top-level header
MD041:
level: 1

View file

@ -1,7 +1,9 @@
rules:
quoted-strings:
required: only-when-needed
extra-allowed: ["{|}"]
quoted-strings: disable
key-duplicates: {}
octal-values:
forbid-implicit-octal: true
empty-lines:
max: 2
max-start: 0
max-end: 0

View file

@ -1,24 +1,17 @@
# Copyright 2021 Praetorian Security, Inc.
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# GoKart analyzers configuration
# Uncomment analyzers section below to create a new vulnerability type
# analyzers:
# # Each entry specifies a vulnerability type.
# # Name of the vulnerability:
# Test Sink:
# # Description of this vulnerability
@ -31,7 +24,6 @@
# log:
# # Function name
# - Printf
# Each entry specifies a source that should be considered untrusted
# If the package already exists in the sources section, add the variable/function/type underneath
# Each package can contain multiple vulnerable sources.

View file

@ -7,7 +7,7 @@ cli:
plugins:
sources:
- id: trunk
ref: v1.7.2
ref: v1.7.3
uri: https://github.com/trunk-io/plugins
# Many linters and tools depend on runtimes - configure them here. (https://docs.trunk.io/runtimes)
runtimes:
@ -18,51 +18,47 @@ runtimes:
# This is the section where you manage your linters. (https://docs.trunk.io/check/configuration)
lint:
disabled:
- cspell
- codespell
- deno
- remark-lint
- markdown-table-prettify
- biome
enabled:
- yamlfmt@0.17.2
- vale@3.12.0
- trunk-toolbox@0.5.4
- trufflehog-git@3.90.6
- snyk@1.1295.0
- remark-lint@12.0.1
- pre-commit-hooks@6.0.0
- nancy@1.0.51
- markdownlint-cli2@0.18.1
- markdown-table-prettify@3.7.0
- markdown-link-check@3.13.7
- ls-lint@2.3.1
- golangci-lint@1.64.8
- gofmt@1.20.4
- gokart@0.5.1
- goimports@0.9.1
- gofumpt@0.5.0
- gitleaks@8.28.0
- deno@2.5.0
- biome@2.2.4
- kube-linter@0.7.2
- golines@0.13.0
- semgrep@1.136.0
- shellcheck@0.11.0
- golangci-lint2@2.5.0
- markdownlint@0.45.0
- osv-scanner@2.2.3
- actionlint@1.7.7
- checkov@3.2.470
- checkov@3.2.477
- cspell
- dotenv-linter@3.3.0
- git-diff-check
- gofmt@1.20.4
- golangci-lint2@2.4.0
- isort@6.0.1
- markdownlint@0.45.0
- osv-scanner@2.2.2
- gitleaks@8.28.0
- golangci-lint@1.64.8
- isort@6.1.0
- kube-linter@0.7.2
- ls-lint@2.3.1
- markdown-link-check@3.13.7
- markdownlint-cli2@0.18.1
- oxipng@9.1.5
- pre-commit-hooks@6.0.0
- prettier@3.6.2
- semgrep@1.139.0
- shellcheck@0.11.0
- shfmt@3.6.0
- trivy@0.66.0
- trufflehog@3.90.6
- snyk@1.1295.0
- trivy@0.67.1
- trunk-toolbox@0.5.4
- trufflehog@3.90.8
- vale@3.12.0
- yamlfmt@0.17.2
- yamllint@1.37.1
ignore:
- linters: [markdownlint]
- linters: [markdownlint-cli2]
paths: [".github/ISSUE_TEMPLATE/**"]
actions:
disabled:
- trunk-fmt-pre-commit
enabled:
- trunk-announce
- trunk-check-pre-push

View file

@ -1,6 +1,6 @@
{
"goVersion": "1.24",
"toolchainVersion": "",
"syspkgVersion": "v0.1.5",
"exportDate": "2025-09-07 11:53:25"
"goVersion": "1.24",
"toolchainVersion": "",
"syspkgVersion": "v0.1.5",
"exportDate": "2025-09-07 11:53:25"
}

View file

@ -2,9 +2,7 @@
"editor.tabSize": 2,
"editor.insertSpaces": true,
"editor.detectIndentation": false,
"editor.rulers": [
100
],
"editor.rulers": [100],
"editor.formatOnSave": true,
"editor.formatOnPaste": true,
"editor.formatOnType": true,
@ -12,7 +10,7 @@
"editor.wordWrapColumn": 100,
"editor.wrappingIndent": "indent",
"[go]": {
"editor.defaultFormatter": "trunk.io"
"editor.defaultFormatter": "golang.go"
},
"[shellscript]": {
"editor.defaultFormatter": "trunk.io"
@ -40,4 +38,4 @@
"files.readonlyInclude": {},
"workbench.editor.defaultBinaryEditor": "default",
"workbench.editor.enablePreviewFromCodeNavigation": false
}
}

516
CLAUDE.md
View file

@ -3,8 +3,6 @@
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [General Code Organization Principles](#general-code-organization-principles)
- [1. Package Structure](#1-package-structure)
- [2. Code Style and Formatting](#2-code-style-and-formatting)
@ -40,13 +38,10 @@
- [Quoting Guidelines](#quoting-guidelines)
- [Formatting Standards](#formatting-standards)
- [Bash Scripts](#bash-scripts)
- [File and Directory Structure](#file-and-directory-structure)
- [Style and Format Rules](#style-and-format-rules)
- [Comments](#comments)
- [Script Testing](#script-testing)
- [YAML Files](#yaml-files)
- [Quoting Guidelines](#quoting-guidelines-1)
- [Examples](#examples)
- [Formatting Guidelines](#formatting-guidelines)
- [Multi-line Strings](#multi-line-strings)
- [GitHub Actions Specific](#github-actions-specific)
- [Testing Principles](#testing-principles)
- [1. Test Organization Strategy](#1-test-organization-strategy)
- [2. Code Structure](#2-code-structure)
@ -94,12 +89,11 @@
- Make zero values useful
- Keep interfaces small and focused, observing the
[single responsibility principle](https://en.wikipedia.org/wiki/Single-responsibility_principle)
- Observe the
[open-closed principle](https://en.wikipedia.org/wiki/Open%E2%80%93closed_principle)
- Observe the [open-closed principle](https://en.wikipedia.org/wiki/Open%E2%80%93closed_principle)
so that it is open for extension but closed to modification
- Observe the
[dependency inversion principle](https://en.wikipedia.org/wiki/Dependency_inversion_principle)
to keep interfaces loosely coupled
[dependency inversion principle](https://en.wikipedia.org/wiki/Dependency_inversion_principle) to
keep interfaces loosely coupled
- Design for composition over inheritance
- Use option patterns for complex configurations
- Make dependencies explicit
@ -111,10 +105,8 @@
Following the official [Go Documentation Guidelines](https://go.dev/blog/godoc):
1. **Package Documentation**
- Every package must have a doc comment immediately before the `package`
statement
- Format: `// Package xyz ...` (first sentence) followed by detailed
description
- Every package must have a doc comment immediately before the `package` statement
- Format: `// Package xyz ...` (first sentence) followed by detailed description
- First sentence should be a summary beginning with `Package xyz`
- Follow with a blank line and detailed documentation
- Include package-level examples if helpful
@ -462,27 +454,25 @@ go tool pprof -http=:8080 cpu.prof
- Minimize the amount of shell code and put complex logic in the Go code
- Use clear step `id` names that use dashes between words and active verbs
- Avoid hard-coded API URLs like <https://api.github.com>. Use environment
variables (GITHUB_API_URL for REST API, GITHUB_GRAPHQL_URL for GraphQL) or the
@actions/github toolkit for dynamic URL handling
- Avoid hard-coded API URLs like <https://api.github.com>. Use environment variables (GITHUB_API_URL
for REST API, GITHUB_GRAPHQL_URL for GraphQL) or the @actions/github toolkit for dynamic URL
handling
##### Release Management
- Use semantic versioning for releases (e.g., v1.0.0)
- Recommend users reference major version tags (v1) instead of the default
branch for stability.
- Recommend users reference major version tags (v1) instead of the default branch for stability.
- Update major version tags to point to the latest release
##### Create a README File
Include a detailed description, required/optional inputs and outputs, secrets,
environment variables, and usage examples
Include a detailed description, required/optional inputs and outputs, secrets, environment
variables, and usage examples
##### Testing and Automation
- Add workflows to test your action on feature branches and pull requests
- Automate releases using workflows triggered by publishing or editing a
release.
- Automate releases using workflows triggered by publishing or editing a release.
##### Community Engagement
@ -601,78 +591,127 @@ jobs:
Project scripts should follow these guidelines:
- Follow formatting rules in
[ShellCheck](https://github.com/koalaman/shellcheck/wiki)
- Follow style guide rules in
[Google Bash Style Guide](https://google.github.io/styleguide/shellguide)
- Include proper error handling and exit codes
- Use `scripts/lib.sh` whenever for common functionality
#### File and Directory Structure
All scripts must go into the project's script directory with specific guidance below
```text
scripts
├── dev (directory for local development)
│ ├── *.sh (scripts used for development)
│ ├── lib.sh (library to use for common development functionality)
│ └── tests
│ ├── *.sh (corresponding tests for all scripts in the parent)
│ ├── template_test.sh (example template for all tests)
│ └── test_lib.sh (library to use for common testing functionality)
├── *.sh (files used for integration and deployment operations)
├── lib.sh (general library for common functionality applying to all script types)
├── lib_test.sh (testing framework that all script tests must use)
├── template_test.sh (example template for all tests)
└── template.sh (example template for all development scripts)
```
- `scripts` is where all scripts go, either at root or in sub-folders
- `scripts/*.sh` is where non-development functionality like integration and deployment
functionality goes
- `dev` is where local development functionality only
- `dev/lib.sh` is where common functionality for local development scripts go
- `dev/tests.sh` is for local development script tests
- `lib_test.sh` is the testing framework that all scripts must use
- `lib.sh` is the common functionality applying to both root scripts and `dev` scripts
- `template_test.sh` baseline template that all script tests must be created from
- `template.sh` baseline template that all scripts must be created from
- Make scripts executable (`chmod +x`)
- Add new development script functionality to the `scripts/dev/menu.sh` script for easy access
- Always use `template.sh` when creating a script
#### Style and Format Rules
- **MANDATORY:** All Bash scripts must strictly follow the [Google Bash Style Guide](https://google.github.io/styleguide/shellguide) for naming, formatting, comments, and best practices.
- **MANDATORY:** All Bash scripts must pass [ShellCheck](https://github.com/koalaman/shellcheck/wiki) with no warnings or errors.
- **MANDATORY:** All script comments and header blocks must wrap at a maximum line length of 80 characters.
- Use the `function` keyword before all function definitions: `function my_function() {`
- Use imperative verb form for script names:
- Good: `export_version.sh`, `build_package.sh`, `run_tests.sh`
- Bad: `version_export.sh`, `package_builder.sh`, `test_runner.sh`
- Create scripts in the `scripts` directory (not `tools`)
- Make scripts executable (`chmod +x`)
- Add new functionality to the `scripts/menu.sh` script for easy access
- Add usage information (viewable with `-h` or `--help`)
Script Header Format:
Scripts that do not comply with these external standards will be flagged in code review and CI. See:
- [Google Bash Style Guide](https://google.github.io/styleguide/shellguide)
- [ShellCheck Wiki](https://github.com/koalaman/shellcheck/wiki)
#### Comments
For functions:
- All functions greater than 5 lines must have a comment
- Always follow the format described in
[Google Bash Style Guide: Function Comments](https://google.github.io/styleguide/shellguide#function-comments)
#### Script Header Requirements (MANDATORY)
Every Bash script must begin with a standardized header block, formatted as follows:
```bash
#!/bin/bash
#==============================================================================
# script_name.sh
# <script_name>.sh
#==============================================================================
#
# DESCRIPTION:
# Brief description of what the script does.
# Additional details if needed.
# <Detailed description of the script's purpose and functionality>
#
# USAGE:
# ./scripts/script_name.sh [options]
# <script_name>.sh <command> [args]
#
# OPTIONS: (if applicable)
# List of command-line options and their descriptions
# COMMANDS:
# <command_1> <Description of command_1>
# <command_2> <Description of command_2>
# ...
#
# OPTIONS:
# -h, --help Show this help message
# ... <Other options and their descriptions>
#
# DEPENDENCIES:
# - List of required tools and commands
# <List required dependencies, e.g. external tools, environment variables>
#==============================================================================
```
Every script should include this header format at the top, with all sections
filled out appropriately. The header provides:
Checklist for script headers:
- Clear identification of the script
- Description of its purpose and functionality
- Script name and clear identification
- Detailed description
- Usage instructions and examples
- Documentation of command-line options (if any)
- List of required dependencies
- Command and option documentation
- Required dependencies
All new and updated scripts must comply with this format. Non-compliant scripts will be flagged in code review and CI.
#### Script Testing
All scripts must have corresponding tests in the `scripts/tests` directory using
the common test library:
All scripts must have corresponding tests in the `tests` sub-directory using the common test
library:
1. **Test File Structure**
- Name test files as `<script_name>_test.sh`
- Place in `scripts/tests` directory
- Place in a `tests` direct sub-directory to the script under test
- Make test files executable (`chmod +x`)
- Source the common test library (`test_lib.sh`)
2. **Common Test Library** The `test_lib.sh` library provides a standard test
framework. See the `scripts/tests/template_test.sh` for examples of how to
set up one.
3. **Test Organization**
2. **Test Organization**
- Group related test cases into sections
- Test each command/flag combination
- Test error conditions explicitly
4. **Test Coverage**
3. **Test Coverage**
- Test error conditions
- Test input validation
- Test edge cases
- Test each supported flag/option
5. **CI Integration**
4. **CI Integration**
- Tests run automatically in CI
- Tests must pass before merge
- Test execution is part of the validate-scripts job
@ -680,130 +719,23 @@ the common test library:
##### Test Framework Architecture Pattern
The improved test framework follows this standardized pattern for all script
tests:
**Test File Template:**
```bash
#!/bin/bash
#==============================================================================
# script_name_test.sh
#==============================================================================
#
# DESCRIPTION:
# Test suite for script_name.sh functionality.
# Brief description of what aspects are tested.
#
# USAGE:
# script_name_test.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
# Set up the script path we want to test
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
export SCRIPT_PATH="$SCRIPT_DIR/../script_name.sh"
# Source the test framework
source "$SCRIPT_DIR/test_lib.sh"
# Define test functions
run_tests() {
test_section "Help and Usage"
test_case "shows help message" \
"--help" \
"USAGE:" \
true
test_case "shows error for invalid option" \
"--invalid-option" \
"Unknown option" \
false
test_section "Core Functionality"
# Add more test cases here
}
# Start the test framework and run tests
start_tests "$@"
run_tests
```
All tests must start with `scripts/template_test.sh`
**Key Framework Features:**
- **SCRIPT_PATH Setup**: Test files must set `SCRIPT_PATH` before sourcing
`test_lib.sh` to avoid variable conflicts
- **Function-based Test Organization**: Tests are organized in a `run_tests()`
function called after framework initialization
- **Consistent Test Sections**: Use `test_section` to group related tests with
descriptive headers
- **Standard Test Case Pattern**:
`test_case "name" "args" "expected_output" "should_succeed"`
- **Framework Integration**: Call `start_tests "$@"` before running tests to
handle argument parsing and setup
##### Script Argument Parsing Pattern
All scripts should implement consistent argument parsing following this pattern:
```bash
main() {
# Parse command line arguments first
while [[ $# -gt 0 ]]; do
case $1 in
-v | --verbose)
export VERBOSE=true
;;
-h | --help)
cat << 'EOF'
USAGE:
script_name.sh [OPTIONS]
DESCRIPTION:
Brief description of what the script does.
Additional details if needed.
OPTIONS:
-v, --verbose Enable verbose output
-h, --help Show this help message
EOF
exit 0
;;
*)
echo "Unknown option: $1" >&2
echo "Use --help for usage information." >&2
exit 1
;;
esac
shift
done
# Script main logic here
}
main "$@"
```
**Key Argument Parsing Features:**
- **Consistent Options**: All scripts support `-v/--verbose` and `-h/--help`
- **Early Help Exit**: Help is displayed immediately without running script
logic
- **Error Handling**: Unknown options produce helpful error messages
- **Inline Help Text**: Help is embedded in the script using heredoc syntax
- **SCRIPT_PATH Setup**: Test files must set `SCRIPT_PATH` before sourcing `test_lib.sh` to avoid
variable conflicts
- **Function-based Test Organization**: Tests are organized in a `run_tests()` function called after
framework initialization
- **Consistent Test Sections**: Use `test_section` to group related tests with descriptive headers
- **Standard Test Case Pattern**: `test_case "name" "args" "expected_output" "should_succeed"`
- **Framework Integration**: Call `start_tests "$@"` before running tests to handle argument parsing
and setup
##### Centralized Configuration Management
The project implements centralized version management using the `.env` file as a
single source of truth:
The project implements centralized version management using the `.env` file as a single source of
truth:
**Configuration Structure:**
@ -840,208 +772,10 @@ jobs:
**Synchronization Script Pattern:**
- `scripts/sync_go_version.sh` reads `.env` and updates `go.mod` accordingly
- Ensures consistency between environment configuration and Go module
requirements
- `scripts/dev/sync_go_version.sh` reads `.env` and updates `go.mod` accordingly
- Ensures consistency between environment configuration and Go module requirements
- Can be extended for other configuration synchronization needs
##### Implementation Status
**✅ Implemented Patterns:**
The following scripts have been updated with the standardized patterns:
1. **scripts/export_version.sh** - Complete implementation:
- ✅ Argument parsing with `--help` and `--verbose`
- ✅ Proper error handling and logging
- ✅ Comprehensive test suite in `scripts/tests/export_version_test.sh`
- ✅ Function-based test organization
2. **scripts/setup_dev.sh** - Complete implementation:
- ✅ Argument parsing with `--help` and `--verbose`
- ✅ Script-specific help documentation
- ✅ Error handling for unknown options
- ✅ Comprehensive test suite in `scripts/tests/setup_dev_test.sh`
- ✅ Function-based test organization
3. **scripts/tests/test_lib.sh** - Framework improvements:
- ✅ Reliable library loading with fallback paths
- ✅ Safe SCRIPT_PATH variable handling
- ✅ Arithmetic operations compatible with `set -e`
- ✅ Proper script name detection
- ✅ Lazy temporary directory initialization
- ✅ Comprehensive documentation and architecture notes
4. **Centralized Configuration Management**:
- ✅ `.env` file as single source of truth for versions
- ✅ GitHub Actions CI integration with version propagation
- ✅ `scripts/sync_go_version.sh` for configuration synchronization
**🔄 Remaining Scripts to Update:**
These scripts need the same pattern implementations:
- `scripts/distribute.sh` - Needs argument parsing and testing
- `scripts/update_md_tocs.sh` - Needs argument parsing and testing
- `scripts/check_and_fix_env.sh` - Needs argument parsing and testing
- `scripts/template.sh` - Needs argument parsing and testing
- `scripts/menu.sh` - Needs argument parsing and testing
Example script structure:
```bash
#!/bin/bash
#==============================================================================
# fix_and_update.sh
#==============================================================================
#
# DESCRIPTION:
# Runs lint fixes and checks for UTF-8 formatting issues in the project.
# Intended to help maintain code quality and formatting consistency.
#
# USAGE:
# ./scripts/fix_and_update.sh
#
# OPTIONS:
# -h, --help Show this help message
#
# DEPENDENCIES:
# - trunk (for linting)
# - bash
# - ./scripts/check_utf8.sh
#==============================================================================
# Resolves to absolute path and loads library
source "$(cd "$(dirname "$0")" && pwd)/lib.sh"
main_menu() {
if false; then
show_help # Uses the script header to output usage message
fi
}
# ...
# Script logic, variables and functions.
# ...
# Parse common command line arguments and hand the remaining to the script
remaining_args=$(parse_common_args "$@")
# Run main menu
main_menu
```
### YAML Files
YAML files in the project (GitHub Actions workflows, configuration files, etc.)
should follow these best practices:
#### Quoting Guidelines
- **Avoid unnecessary quotes** - YAML values don't need quotes unless they
contain special characters
- **Use quotes when required**:
- Values containing spaces: `version: "test version with spaces"`
- Empty strings: `packages: ""`
- Values starting with special characters: `value: "@special"`
- Boolean-like strings that should be treated as strings: `value: "true"` (if
you want the string "true", not boolean)
- Numeric-like strings: `version: "1.0"` (if you want string "1.0", not number
1.0)
#### Examples
**Good - No unnecessary quotes**:
```yaml
name: Test Action
on: workflow_dispatch
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: ./
with:
packages: curl wget
version: test-1.0
debug: true
```
**Avoid - Unnecessary quotes**:
```yaml
name: "Test Action"
on: "workflow_dispatch"
jobs:
test:
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v4"
- uses: "./"
with:
packages: "curl wget"
version: "test-1.0"
debug: "true"
```
**Good - Quotes when needed**:
```yaml
# Quotes required for values with spaces
version: "test version with spaces"
# Quotes required for empty strings
packages: ""
# No quotes needed for simple values
debug: true
timeout: 300
name: test-job
```
#### Formatting Guidelines
- Use 2-space indentation consistently
- Keep lines under 120 characters when possible
- Use `|` for multi-line strings that need line breaks preserved
- Use `>` for multi-line strings that should be folded
- Align nested items consistently
- Use meaningful names for job IDs and step IDs (use kebab-case)
#### Multi-line Strings
```yaml
# For scripts that need line breaks preserved
run: |
echo "Line 1"
echo "Line 2"
if [[ condition ]]; then
echo "Line 3"
fi
# For long descriptions that should be folded
description: >
This is a very long description that
will be folded into a single line
when parsed by YAML.
# For package lists (GitHub Actions input)
packages: |
curl
wget
jq
```
#### GitHub Actions Specific
- Use unquoted boolean values: `required: true`, `debug: false`
- Use unquoted numeric values: `timeout-minutes: 30`
- Quote version strings that might be interpreted as numbers: `version: "1.0"`
- Use kebab-case for input/output names: `cache-hit`, `package-version-list`
- Use meaningful step IDs: `test-basic-install`, `verify-cache-hit`
## Testing Principles
### 1. Test Organization Strategy
@ -1049,8 +783,8 @@ packages: |
We established a balanced approach to test organization:
- Use table-driven tests for simple, repetitive cases without introducing logic
- Use individual test functions for cases that require specific Arrange, Act,
Assert steps that cannot be shared amongst other cases
- Use individual test functions for cases that require specific Arrange, Act, Assert steps that
cannot be shared amongst other cases
- Group related test cases that operate on the same API method / function
### 2. Code Structure
@ -1069,8 +803,8 @@ var (
)
```
- Define constants for fixed values where the presence and format is only needed
and the value content itself does not affect the behavior under test
- Define constants for fixed values where the presence and format is only needed and the value
content itself does not affect the behavior under test
- Use variables for reusable test data
- Group related constants and variables together
- Do not prefix constants or variables with `test`
@ -1091,8 +825,7 @@ func assertValidJSON(t *testing.T, data string) {
}
```
Example of using functions to abstract away details not relevant to the behavior
under test
Example of using functions to abstract away details not relevant to the behavior under test
```go
type Item struct {
@ -1153,12 +886,11 @@ func TestAddPrefixToDescription_WithValidInput_AddsPrefix(t *testing.T) {
}
```
- Create helper functions to reduce duplication and keeps tests focused on the
arrangement inputs and how they correspond to the expected output
- Create helper functions to reduce duplication and keeps tests focused on the arrangement inputs
and how they correspond to the expected output
- Use `t.Helper()` for proper test failure reporting
- Keep helpers focused and single-purpose
- Helper functions that require logic should go into their own file and have
tests
- Helper functions that require logic should go into their own file and have tests
### 3. Test Case Patterns
@ -1281,11 +1013,9 @@ func assertBalanceEquals(t *testing.T, expected, actual decimal.Decimal) {
- Use descriptive test name arrangement and expectation parts
- Use test name formats in a 3 part structure
- `Test<function>_<arrangement>_<expectation>` for free functions, and
- `Test<interface><function>_<arrangement>_<expectation>` for interface
functions.
- `Test<interface><function>_<arrangement>_<expectation>` for interface functions.
- The module name is inferred
- Treat the first part as either the type function or the free function
under test
- Treat the first part as either the type function or the free function under test
```go
func Test<[type]<function>>_<arrangement>_<expectation>(t *testing.T) {
@ -1457,5 +1187,5 @@ These improvements make the test code:
- More reliable
- More efficient to extend
The patterns and principles can be applied across different types of tests to
create a consistent and effective testing strategy.
The patterns and principles can be applied across different types of tests to create a consistent
and effective testing strategy.

View file

@ -1,6 +1,7 @@
# Command Line Usage Guide
This document provides information about using the `cache-apt-pkgs` command line tool.
This document provides information about using the `cache-apt-pkgs` command line
tool.
## Basic Usage
@ -23,7 +24,8 @@ cache-apt-pkgs install [flags] [packages]
#### Flags for Install
- `--version`: Cache version identifier (optional)
- `--execute-scripts`: Execute package install scripts (optional, default: false)
- `--execute-scripts`: Execute package install scripts (optional, default:
false)
#### Install Examples
@ -74,7 +76,8 @@ cache-apt-pkgs restore [flags] [packages]
#### Flags for Restore
- `--version`: Cache version to restore from (optional)
- `--execute-scripts`: Execute package install scripts (optional, default: false)
- `--execute-scripts`: Execute package install scripts (optional, default:
false)
#### Restore Examples

View file

@ -44,11 +44,11 @@ version 2.0 once in beta.
```bash
# Interactive menu for all development tasks
./scripts/menu.sh
./scripts/dev/menu.sh
# Or use individual scripts directly:
./scripts/setup_dev.sh # Set up development environment
./scripts/update_md_tocs.sh # Update table of contents in markdown files
./scripts/dev/setup_dev.sh # Set up development environment
./scripts/dev/update_md_tocs.sh # Update table of contents in markdown files
```
### 📜 Available Development Scripts
@ -71,7 +71,7 @@ The project includes several utility scripts to help with development:
To access the menu system, run:
```bash
./scripts/menu.sh
./scripts/dev/menu.sh
```
This will present an interactive menu with all available development tasks.
@ -118,6 +118,7 @@ There are two ways to test the GitHub Action workflows:
- 🎭 Install [`act`](https://github.com/nektos/act) for local GitHub Actions
testing:
- ▶️ Run `act` on any action test in the following ways:
```bash
@ -177,6 +178,7 @@ There are two ways to test the GitHub Action workflows:
- Install [`act`](https://github.com/nektos/act) for local GitHub Actions
testing:
- Run `act` on any action test in the following ways:
```bash

231
README.md
View file

@ -9,41 +9,30 @@
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Cache APT Packages Action](#cache-apt-packages-action)
- [🚀 Quick Start](#-quick-start)
- [✨ Features](#-features)
- [📋 Requirements](#-requirements)
- [🔧 Configuration](#-configuration)
- [Inputs](#inputs)
- [Outputs](#outputs)
- [📝 Usage Guide](#-usage-guide)
- [Version Selection](#version-selection)
- [Basic Example](#basic-example)
- [Advanced Example](#advanced-example)
- [🔍 Cache Details](#-cache-details)
- [Cache Scoping](#cache-scoping)
- [Cache Keys](#cache-keys)
- [Cache Invalidation](#cache-invalidation)
- [🚨 Common Issues](#-common-issues)
- [Permission Issues](#permission-issues)
- [Missing Dependencies](#missing-dependencies)
- [Cache Misses](#cache-misses)
- [🤝 Contributing](#-contributing)
- [📜 License](#-license)
- [🌟 Acknowledgements](#-acknowledgements)
- [Getting Started](#getting-started)
- [Workflow Setup](#workflow-setup)
- [Detailed Configuration](#detailed-configuration)
- [Input Parameters](#input-parameters)
- [Output Values](#output-values)
- [Cache scopes](#cache-scopes)
- [Example workflows](#example-workflows)
- [Build and Deploy `Doxygen` Documentation](#build-and-deploy-doxygen-documentation)
- [Simple Package Installation](#simple-package-installation)
- [Caveats](#caveats)
- [Edge Cases](#edge-cases)
- [Non-file Dependencies](#non-file-dependencies)
- [Cache Limits](#cache-limits)
- [🚀 Quick Start](#-quick-start)
- [✨ Features](#-features)
- [📋 Requirements](#-requirements)
- [🔧 Configuration](#-configuration)
- [Inputs](#inputs)
- [Outputs](#outputs)
- [📝 Usage Guide](#-usage-guide)
- [Version Selection](#version-selection)
- [Example Workflows](#example-workflows)
- [🔍 Cache Details](#-cache-details)
- [Cache Scoping](#cache-scoping)
- [Cache Keys](#cache-keys)
- [Cache Invalidation](#cache-invalidation)
- [🚨 Common Issues](#-common-issues)
- [Permission Issues](#permission-issues)
- [Missing Dependencies](#missing-dependencies)
- [Cache Misses](#cache-misses)
- [🤝 Contributing](#-contributing)
- [📜 License](#-license)
- [Caveats](#caveats)
- [Edge Cases](#edge-cases)
- [Non-file Dependencies](#non-file-dependencies)
- [Cache Limits](#cache-limits)
- [🌟 Acknowledgements](#-acknowledgements)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -103,11 +92,10 @@ steps:
### Version Selection
> ⚠️ Starting with this release, the action enforces immutable references.
> Workflows must pin `awalsh128/cache-apt-pkgs-action` to a release tag or
> commit SHA. Referencing a branch (for example `@main`) will now fail during
> the `setup` step. For more information on blocking and SHA pinning actions,
> see the
> ⚠️ The action enforces immutable references. Workflows must pin
> `awalsh128/cache-apt-pkgs-action` to a release tag or commit SHA. Referencing
> a branch (for example `@main`) will now fail during the `setup` step. For more
> information on blocking and SHA pinning actions, see the
> [announcement on the GitHub changelog](https://github.blog/changelog/2025-08-15-github-actions-policy-now-supports-blocking-and-sha-pinning-actions).
Recommended options:
@ -119,7 +107,9 @@ Avoid floating references such as `@latest`, `@master`, or `@dev`. The action
will refuse to run when a branch reference is detected to protect consumers from
involuntary updates.
### Basic Example
### Example Workflows
Install a set of packages and build your code.
```yaml
name: Build
@ -143,51 +133,34 @@ jobs:
make
```
### Advanced Example
Install `Doxygen` dependencies for building and deploying documentation.
```yaml
name: Complex Build
on: [push]
name: Create Documentation
on: push
jobs:
build:
build_and_deploy_docs:
runs-on: ubuntu-latest
name: Build Doxygen documentation and deploy
steps:
- uses: actions/checkout@v3
- name: Cache APT Packages
uses: awalsh128/cache-apt-pkgs-action@v2
id: apt-cache
- uses: actions/checkout@v4
- uses: awalsh128/cache-apt-pkgs-action@latest
with:
packages: python3-dev cmake libboost-all-dev
version: ${{ github.sha }}
execute_install_scripts: true
packages: dia doxygen doxygen-doc doxygen-gui doxygen-latex graphviz mscgen
version: 1.0
- name: Cache Info
- name: Build
run: |
echo "Cache hit: ${{ steps.apt-cache.outputs.cache-hit }}"
echo "Installed packages: ${{ steps.apt-cache.outputs.package-version-list }}"
cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}}
cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}}
- name: Deploy
uses: JamesIves/github-pages-deploy-action@4.1.5
with:
branch: gh-pages
folder: ${{github.workspace}}/build/website
```
### Binary Integrity Verification
Every published release bundles precompiled binaries under
`distribute/<runner arch>/cache_apt_pkgs`. Starting with this release the action
verifies the binary against a co-located `.sha256` manifest before execution. If
the checksum does not match the expected value the `setup` step exits with an
error to prevent tampering or incomplete releases.
When preparing a new release:
1. Run `scripts/distribute.sh push` to build architecture-specific binaries.
2. The script now emits a matching `cache-apt-pkgs-linux-<arch>.sha256` file for
each binary.
3. Copy the binaries and checksum files into `distribute/<arch>/` before
creating the release artifact.
Workflows do not need to perform any additional setup—the checksum enforcement
is automatic as long as the bundled `.sha256` files accompany the binaries.
## 🔍 Cache Details
### Cache Scoping
@ -245,106 +218,6 @@ for details.
This project is licensed under the Apache License 2.0 - see the
[LICENSE](LICENSE) file for details.
## 🌟 Acknowledgements
- [actions/cache](https://github.com/actions/cache/) team
- All our
[contributors](https://github.com/awalsh128/cache-apt-pkgs-action/graphs/contributors)
### Getting Started
#### Workflow Setup
Create a workflow `.yml` file in your repositories `.github/workflows`
directory. [Example workflows](#example-workflows) are available below. For more
information, reference the GitHub Help Documentation for
[Creating a workflow file](https://help.github.com/en/articles/configuring-a-workflow#creating-a-workflow-file).
#### Detailed Configuration
##### Input Parameters
- `packages` - Space delimited list of packages to install.
- `version` - Version of cache to load. Each version will have its own cache.
Note, all characters except spaces are allowed.
- `execute_install_scripts` - Execute Debian package 'preinst' and 'postinst'
install scripts upon restore. See
[Caveats / Non-file Dependencies](#non-file-dependencies) for more
information.
##### Output Values
- `cache-hit` - A `true` or `false` value to indicate a cache was found for the
packages requested.
- `package-version-list` - The main requested packages and versions that are
installed. Represented as a comma delimited list with equals delimit on the
package version (i.e. \<package1>=<version1\>,\<package2>=\<version2>,...).
- `all-package-version-list` - All the pulled in packages and versions,
including dependencies, that are installed. Represented as a comma delimited
list with equals delimit on the package version (i.e.
\<package1>=<version1\>,\<package2>=\<version2>,...).
### Cache scopes
The cache is scoped to:
- Package list and versions
- Branch settings
- Default branch cache (available to other branches)
### Example workflows
Below are some example workflows showing how to use this action.
#### Build and Deploy `Doxygen` Documentation
This example shows how to cache dependencies for building and deploying
`Doxygen` documentation:
```yaml
name: Create Documentation
on: push
jobs:
build_and_deploy_docs:
runs-on: ubuntu-latest
name: Build Doxygen documentation and deploy
steps:
- uses: actions/checkout@v4
- uses: awalsh128/cache-apt-pkgs-action@latest
with:
packages: dia doxygen doxygen-doc doxygen-gui doxygen-latex graphviz mscgen
version: 1.0
- name: Build
run: |
cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}}
cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}}
- name: Deploy
uses: JamesIves/github-pages-deploy-action@4.1.5
with:
branch: gh-pages
folder: ${{github.workspace}}/build/website
```
#### Simple Package Installation
This example shows the minimal configuration needed to cache and install
packages:
```yaml
name: Install Dependencies
jobs:
install_doxygen_deps:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: awalsh128/cache-apt-pkgs-action@latest
with:
packages: dia doxygen doxygen-doc doxygen-gui doxygen-latex graphviz mscgen
version: 1.0
```
## Caveats
### Edge Cases
@ -404,3 +277,9 @@ caches will be evicted based on when the cache was last accessed. Caches that
are not accessed within the last week will also be evicted. To get more
information on how to access and manage your actions's caches, see
[GitHub Actions / Using workflows / Cache dependencies](https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#viewing-cache-entries).
## 🌟 Acknowledgements
- [actions/cache](https://github.com/actions/cache/) team
- All our
[contributors](https://github.com/awalsh128/cache-apt-pkgs-action/graphs/contributors)

View file

@ -1,32 +1,32 @@
name: Cache APT Packages
description: Install APT based packages and cache them for future runs.
author: awalsh128
branding:
icon: hard-drive
color: green
inputs:
packages:
description: Space delimited list of packages to install. Version can be specified optionally using APT command syntax of <name>=<version> (e.g. xdot=1.2-2).
description: >
Space delimited list of packages to install. Version can be specified optionally using APT command syntax of <name>=<version> (e.g. xdot=1.2-2).
required: true
default: ""
version:
description: Version of cache to load. Each version will have its own cache. Note, all characters except spaces are allowed.
description: >
Version of cache to load. Each version will have its own cache. Note, all characters except spaces are allowed.
required: false
default: ""
execute_install_scripts:
description: Execute Debian package pre and post install script upon restore. See README.md caveats for more information.
description: >
Execute Debian package pre and post install script upon restore. See README.md caveats for more information.
required: false
default: "false"
refresh:
description: "OBSOLETE: Refresh is not used by the action, use version instead."
deprecationMessage: Refresh is not used by the action, use version instead.
debug:
description: Enable debugging when there are issues with action. Minor performance penalty.
required: false
default: "false"
outputs:
cache-hit:
description: A boolean value to indicate a cache was found for the packages requested.
@ -34,58 +34,74 @@ outputs:
# Need to output true and false instead of true and nothing.
value: ${{ steps.load-cache.outputs.cache-hit || false }}
package-version-list:
description: The main requested packages and versions that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>).
description: >
The main requested packages and versions that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>).
value: ${{ steps.install-pkgs.outputs.package-version-list || steps.restore-pkgs.outputs.package-version-list }}
all-package-version-list:
description: All the pulled in packages and versions, including dependencies, that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>).
value: ${{ steps.install-pkgs.outputs.all-package-version-list || steps.restore-pkgs.outputs.all-package-version-list }}
description: >
All the pulled in packages and versions, including dependencies, that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>).
value: ${{ steps.install-pkgs.outputs.all-package-version-list || steps.restore-pkgs.outputs.all-package-version-list }}
runs:
using: composite
steps:
- id: set-env
shell: bash
run: |
vars=( \
"BINARY_PATH=${{ github.action_path }}/distribute/${{ runner.arch }}/cache_apt_pkgs" \
"CACHE_DIR=~/cache-apt-pkgs" )
for var in "${vars[@]}"; do echo "$var" >> "$GITHUB_ENV"; done
- id: setup
shell: bash
run: |
${{ github.action_path }}/distribute/${{ runner.arch }}/cache_apt_pkgs setup \
--binary-path ${{ github.action_path }}/distribute/${{ runner.arch }}/cache_apt_pkgs
--checksum-file ${{ github.action_path }}/distribute/${{ runner.arch }}/cache_apt_pkgs.sha256
run: scripts/setup.sh "$BINARY_PATH" "$BINARY_PATH.sha256"
- id: create-cache-key
shell: bash
env:
VERSION: ${{ inputs.version }}
PACKAGES: ${{ inputs.packages }}
OS_ARCH: ${{ runner.arch }}
run: |
${{ github.action_path }}/distribute/${{ runner.arch }}/cache_apt_pkgs createkey \
-os-arch ${{ runner.arch }} \
-cache-dir ~/cache-apt-pkgs \
-version "${{ inputs.version }}" \
-global-version "20250910" \
${{ inputs.packages }}
$BINARY_PATH createkey \
--os-arch $OS_ARCH \
--cache-dir "$CACHE_DIR" \
--version "$VERSION" \
--global-version "20250910" \
${PACKAGES}
- id: load-cache
uses: actions/cache/restore@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
with:
path: ~/cache-apt-pkgs
path: "$CACHE_DIR"
key: cache-apt-pkgs_${{ steps.create-cache-key.outputs.cache-key }}
- id: restore-pkgs
if: ${{ steps.load-cache.outputs.cache-hit == 'true' }}
env:
INPUTS_PACKAGES: ${{ inputs.packages }}
shell: bash
run: |
${{ github.action_path }}/distribute/${{ runner.arch }}/cache_apt_pkgs restore \
--cache-dir ~/cache-apt-pkgs \
$BINARY_PATH restore \
--cache-dir "$CACHE_DIR" \
--restore-root "/" \
${{ inputs.packages }}
${INPUTS_PACKAGES}
- id: install-pkgs
if: ${{ steps.load-cache.outputs.cache-hit != 'true' }}
env:
INPUTS_VERSION: ${{ inputs.version }}
INPUT_PACKAGES: ${{ inputs.packages }}
shell: bash
run: |
${{ github.action_path }}/distribute/${{ runner.arch }}/cache_apt_pkgs install \
--cache-dir ~/cache-apt-pkgs \
--version "${{ inputs.version }}" \
$BINARY_PATH install \
--cache-dir "$CACHE_DIR" \
--version "$INPUTS_VERSION" \
--global-version "20250910" \
${{ inputs.packages }}
$INPUT_PACKAGES
- id: upload-artifacts
if: ${{ inputs.debug == 'true' }}
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: cache-apt-pkgs-logs_${{ steps.create-cache-key.outputs.cache-key }}
path: ~/cache-apt-pkgs/*.log
path: $CACHE_DIR/*.log
- id: save-cache
if: ${{ ! steps.load-cache.outputs.cache-hit }}
uses: actions/cache/save@v4
@ -94,6 +110,4 @@ runs:
key: ${{ steps.load-cache.outputs.cache-primary-key }}
- id: clean-cache
shell: bash
run: |
${{ github.action_path }}/distribute/${{ runner.arch }}/cache_apt_pkgs cleanup \
--cache-dir ~/cache-apt-pkgs
run: rm -fr "$CACHE_DIR"

View file

@ -1,212 +0,0 @@
// Package main implements the cache-apt-pkgs command line tool.
// It provides functionality to cache and restore APT packages in GitHub Actions,
// with commands for creating cache keys, installing packages, and restoring from cache.
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
// ExamplePackages provides a set of sample packages used for testing and documentation.
// It includes rolldice, xdot with a specific version, and libgtk-3-dev.
var ExamplePackages = pkgs.NewPackages(
pkgs.Package{Name: "rolldice"},
pkgs.Package{Name: "xdot", Version: "1.1-2"},
pkgs.Package{Name: "libgtk-3-dev"},
)
// binaryName is the base name of the command executable, used in usage and error messages.
var binaryName = filepath.Base(os.Args[0])
// globalFlags defines the command-line flags that apply to all commands.
// It includes options for verbosity and help documentation.
var globalFlags = func() *flag.FlagSet {
flags := flag.NewFlagSet("global", flag.ExitOnError)
flags.BoolVar(new(bool), "verbose", false, "Enable verbose logging")
flags.BoolVar(new(bool), "v", false, "Enable verbose logging (shorthand)")
flags.BoolVar(new(bool), "help", false, "Show help")
flags.BoolVar(new(bool), "h", false, "Show help (shorthand)")
return flags
}()
func (c *Cmds) usage() {
fmt.Fprintf(os.Stderr, "usage: %s <command> [flags] [packages]\n\n", binaryName)
fmt.Fprintf(os.Stderr, "commands:\n")
for _, cmd := range *c {
fmt.Fprintf(os.Stderr, " %s: %s\n", cmd.Name, cmd.Description)
}
fmt.Fprintf(os.Stderr, "\nflags:\n")
// Print global flags (from any command, since they are the same)
globalFlags.VisitAll(func(f *flag.Flag) {
fmt.Fprintf(os.Stderr, " -%s: %s\n", f.Name, f.Usage)
})
fmt.Fprintf(os.Stderr, "\nUse \"%s <command> --help\" for more information about a command.\n", binaryName)
}
// Cmd represents a command-line subcommand with its associated flags and behavior.
// Each command has a name, description, set of flags, and a function to execute the command.
type Cmd struct {
// Name is the command identifier used in CLI arguments
Name string
// Description explains what the command does
Description string
// Flags contains the command-specific command-line flags
Flags *flag.FlagSet
// Run executes the command with the given packages and returns any errors
Run func(cmd *Cmd, pkgArgs pkgs.Packages) error
Examples []string
ExamplePackages pkgs.Packages
}
// NewCmd creates a new command with the given name, description, examples, and run function.
// It automatically includes global flags and sets up the usage documentation.
// The returned Cmd is ready to be used as a subcommand in the CLI.
func NewCmd(name, description string, examples []string, runFunc func(cmd *Cmd, pkgArgs pkgs.Packages) error) *Cmd {
flags := flag.NewFlagSet(name, flag.ExitOnError)
globalFlags.VisitAll(func(f *flag.Flag) {
flags.Var(f.Value, f.Name, f.Usage)
})
flags.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s %s [flags] [packages]\n\n%s\n\n", binaryName, name, description)
fmt.Fprintf(os.Stderr, "flags:\n")
flags.VisitAll(func(f *flag.Flag) {
fmt.Fprintf(os.Stderr, " -%s: %s\n", f.Name, f.Usage)
})
fmt.Fprintf(os.Stderr, "\nexamples:\n")
for _, example := range examples {
fmt.Fprintf(os.Stderr, " %s %s %s\n", binaryName, name, example)
}
}
return &Cmd{
Name: name,
Description: description,
Flags: flags,
Run: runFunc,
Examples: examples,
ExamplePackages: ExamplePackages,
}
}
// StringFlag returns the string value of a flag by name.
// It panics if the flag does not exist, so ensure the flag exists before calling.
func (c *Cmd) StringFlag(name string) string {
return c.Flags.Lookup(name).Value.String()
}
// Cmds is a collection of subcommands indexed by their names.
// It provides methods for managing and executing CLI subcommands.
type Cmds map[string]*Cmd
// parseFlags processes command line arguments for the command.
// It validates required flags and parses package arguments.
// Returns the parsed package arguments or exits with an error if validation fails.
func (c *Cmd) parseFlags() pkgs.Packages {
logging.Debug("Parsing flags for command %q with args: %v", c.Name, os.Args[2:])
if len(os.Args) < 3 {
logging.Fatalf("command %q requires arguments", c.Name)
}
// Parse the command line flags
if err := c.Flags.Parse(os.Args[2:]); err != nil {
logging.Fatalf("unable to parse flags for command %q: %v", c.Name, err)
}
// Check for missing required flags
missingFlagNames := []string{}
c.Flags.VisitAll(func(f *flag.Flag) {
// Skip all global flags since they are considered optional
if gf := globalFlags.Lookup(f.Name); gf != nil {
return
}
if f.DefValue == "" && f.Value.String() == "" {
logging.Info("Missing required flag: %s", f.Name)
missingFlagNames = append(missingFlagNames, f.Name)
}
})
if len(missingFlagNames) > 0 {
logging.Fatalf("missing required flags for command %q: %s", c.Name, missingFlagNames)
}
logging.Debug("Parsed flags successfully")
// Parse the remaining arguments as package arguments
pkgArgs, err := pkgs.ParsePackageArgs(c.Flags.Args())
if err != nil {
logging.Fatalf("failed to parse package arguments for command %q: %v", c.Name, err)
}
logging.Debug("Parsed package arguments:\n%s", strings.Join(c.Flags.Args(), "\n "))
return pkgArgs
}
// Add registers a new command to the command set.
// Returns an error if a command with the same name already exists.
func (c *Cmds) Add(cmd *Cmd) error {
if _, exists := (*c)[cmd.Name]; exists {
return fmt.Errorf("command %q already exists", cmd.Name)
}
(*c)[cmd.Name] = cmd
return nil
}
// Get retrieves a command by name.
// Returns the command and true if found, or nil and false if not found.
func (c *Cmds) Get(name string) (*Cmd, bool) {
cmd, ok := (*c)[name]
return cmd, ok
}
// Parse processes the command line arguments to determine the command to run
// and its package arguments. Handles help requests and invalid commands.
// Returns the selected command and its parsed package arguments, or exits on error.
func (c *Cmds) Parse() (*Cmd, pkgs.Packages) {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "error: no command specified\n\n")
c.usage()
os.Exit(1)
}
cmdName := os.Args[1]
if cmdName == "--help" || cmdName == "-h" {
c.usage()
os.Exit(0)
}
cmd, ok := c.Get(cmdName)
if !ok {
fmt.Fprintf(os.Stderr, "error: unknown command %q\n\n", binaryName)
c.usage()
os.Exit(1)
}
// Handle command-specific help
for _, arg := range os.Args[2:] {
if arg == "--help" || arg == "-h" {
c.usage()
os.Exit(0)
}
}
pkgArgs := cmd.parseFlags()
if pkgArgs == nil {
fmt.Fprintf(os.Stderr, "error: no package arguments specified for command %q\n\n", cmd.Name)
cmd.Flags.Usage()
os.Exit(1)
}
return cmd, pkgArgs
}
// CreateCmds initializes a new command set with the provided commands.
// Each command is added to the set, and the resulting set is returned.
func CreateCmds(cmd ...*Cmd) *Cmds {
commands := &Cmds{}
for _, c := range cmd {
commands.Add(c)
}
return commands
}

View file

@ -1,177 +0,0 @@
package main
import (
"flag"
"os"
"testing"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
const (
flagSetName = "test_flag_set_name"
flagName = "test-flag"
flagValue = "test_flag_value"
flagDefaultValue = "test_default_flag_value"
cmdName = "test-command-name"
cmdName1 = "test-command-name1"
cmdName2 = "test-command-name2"
)
func TestCmd_StringFlag(t *testing.T) {
cmd := &Cmd{
Name: cmdName,
Flags: flag.NewFlagSet(flagSetName, flag.ContinueOnError),
}
cmd.Flags.String(flagName, flagDefaultValue, "test flag")
// Parse some args to set the flag value
cmd.Flags.Set(flagName, flagValue)
result := cmd.StringFlag(flagName)
if result != flagValue {
t.Errorf("Expected 'custom-value', got '%s'", result)
}
}
func TestCmds_Add(t *testing.T) {
cmds := &Cmds{}
*cmds = make(map[string]*Cmd)
cmd := &Cmd{Name: "test"}
err := cmds.Add(cmd)
if err != nil {
t.Errorf("Unexpected error adding command: %v", err)
}
// Try to add the same command again
err = cmds.Add(cmd)
if err == nil {
t.Error("Expected error when adding duplicate command")
}
}
func TestCmds_Get(t *testing.T) {
cmds := &Cmds{}
*cmds = make(map[string]*Cmd)
cmd := &Cmd{Name: cmdName}
cmds.Add(cmd)
retrieved, ok := cmds.Get(cmdName)
if !ok {
t.Errorf("Expected to find command '%s'", cmdName)
}
if retrieved.Name != cmdName {
t.Errorf("Expected command name '%s', got '%s'", cmdName, retrieved.Name)
}
_, ok = cmds.Get("nonexistent-cmd")
if ok {
t.Error("Expected not to find command 'nonexistent-cmd'")
}
}
func TestCreateCmds(t *testing.T) {
cmd1 := &Cmd{Name: cmdName1}
cmd2 := &Cmd{Name: cmdName2}
cmds := CreateCmds(cmd1, cmd2)
if cmds == nil {
t.Fatal("CreateCmds returned nil")
}
if _, ok := cmds.Get(cmdName1); !ok {
t.Errorf("Expected to find %s", cmdName1)
}
if _, ok := cmds.Get(cmdName2); !ok {
t.Errorf("Expected to find %s", cmdName2)
}
}
func TestCmd_ParseFlags(t *testing.T) {
origArgs := os.Args
defer func() { os.Args = origArgs }()
t.Run("missing command", func(t *testing.T) {
// Test the condition that would trigger the missing command error
// without actually calling Parse() which would exit the test process
os.Args = []string{binaryName}
if len(os.Args) < 2 {
t.Log("Successfully detected missing command condition")
} else {
t.Error("Expected os.Args to have fewer than 2 elements")
}
})
const argExample = "test-package"
const requiredFlagName = "required-flag"
t.Run("missing required flags", func(t *testing.T) {
// This test also has issues because Parse() eventually calls os.Exit
// Let's test the flag parsing logic more directly
cmd := NewCmd(flagSetName, "Test command", []string{argExample}, func(cmd *Cmd, pkgArgs pkgs.Packages) error {
return nil
})
cmd.Flags.String(requiredFlagName, "", "required flag description")
// Test that the flag was added
requiredFlag := cmd.Flags.Lookup(requiredFlagName)
if requiredFlag == nil {
t.Error("Expected required-flag to be registered")
}
if requiredFlag.DefValue != "" {
t.Error("Expected required-flag to have empty default value")
}
})
t.Run("missing package arguments", func(t *testing.T) {
// Test the condition without calling Parse()
os.Args = []string{binaryName, cmdName}
if len(os.Args) >= 2 {
t.Log("Command name would be available, but package args would be missing")
} else {
t.Error("Expected at least 2 args for this test")
}
})
const pkgArg1 = "test-package=1.1-beta"
const pkgArg2 = "test-package=2.0"
t.Run("valid command with packages", func(t *testing.T) {
// Test argument parsing without calling the full Parse() method
os.Args = []string{binaryName, cmdName, pkgArg1, pkgArg2}
if len(os.Args) >= 4 {
actualCmdName := os.Args[1]
actualPkgArgs := os.Args[2:]
if actualCmdName != "test" {
t.Errorf("Expected command '%s', got %s", cmdName, actualCmdName)
}
if len(actualPkgArgs) != 2 {
t.Errorf("Expected 2 package args, got %d", len(actualPkgArgs))
}
} else {
t.Error("Expected at least 4 args for this test")
}
})
t.Run("help flag detection", func(t *testing.T) {
// Test help flag detection logic
os.Args = []string{binaryName, "--help"}
if len(os.Args) >= 2 {
cmdName := os.Args[1]
if cmdName == "--help" || cmdName == "-h" {
t.Log("Successfully detected help flag")
} else {
t.Errorf("Expected help flag, got %s", cmdName)
}
}
})
}

View file

@ -6,13 +6,13 @@ import (
"path/filepath"
"runtime"
"awalsh128.com/cache-apt-pkgs-action/cmd/cache_apt_pkgs/cmdflags"
"awalsh128.com/cache-apt-pkgs-action/internal/cache"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
"github.com/sethvargo/go-githubactions"
)
func createKey(cmd *Cmd, pkgArgs pkgs.Packages) error {
func createKey(cmd *cmdflags.Cmd, pkgArgs pkgs.Packages) error {
key, err := cache.NewKey(
pkgArgs,
cmd.StringFlag("version"),
@ -28,7 +28,7 @@ func createKey(cmd *Cmd, pkgArgs pkgs.Packages) error {
cacheDir := cmd.StringFlag("cache-dir")
plaintextPath := filepath.Join(cacheDir, "cache_key.txt")
ciphertextPath := filepath.Join(cacheDir, "cache_key.md5")
ciphertextPath := filepath.Join(cacheDir, "cache_key.sha256")
if err := key.Write(
plaintextPath,
ciphertextPath); err != nil {
@ -37,22 +37,17 @@ func createKey(cmd *Cmd, pkgArgs pkgs.Packages) error {
logging.Info("Wrote cache key files:\n %s\n %s", plaintextPath, ciphertextPath)
// Output the cache key hash to GitHub Actions
if isGitHubActions() {
githubactions.SetOutput("cache-key", hashHex)
} else {
// In test/development environments, print to stdout
fmt.Printf("cache-key=%s\n", hashHex)
}
cmd.GhioPrinter.SetOutput("cache-key", hashHex)
return nil
}
func GetCreateKeyCmd() *Cmd {
func GetCreateKeyCmd() *cmdflags.Cmd {
examples := []string{
"--os-arch amd64 --cache-dir ~/cache_dir --version 1.0.0 --global-version 1",
"--os-arch x86_64 --cache-dir /tmp/cache_dir --version v2 --global-version 2",
}
cmd := NewCmd("createkey", "Create a cache key based on the provided options", examples, createKey)
cmd := cmdflags.NewCmd("createkey", "Create a cache key based on the provided options", examples, createKey)
cmd.Flags.String("os-arch", runtime.GOARCH,
"OS architecture to use in the cache key.\n"+
"Action may be called from different runners in a different OS. This ensures the right one is fetched")
@ -70,6 +65,6 @@ func GetCreateKeyCmd() *Cmd {
"--os-arch amd64 --cache-dir ~/cache_dir --version 1.0.0 --global-version 1",
"--os-arch x86_64 --cache-dir /tmp/cache_dir --version v2 --global-version 2",
}
cmd.ExamplePackages = ExamplePackages
cmd.ExamplePackages = cmdflags.ExamplePackages
return cmd
}

View file

@ -8,12 +8,13 @@ import (
"strings"
"time"
"awalsh128.com/cache-apt-pkgs-action/cmd/cache_apt_pkgs/cmdflags"
"awalsh128.com/cache-apt-pkgs-action/internal/cache"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
func install(cmd *Cmd, pkgArgs pkgs.Packages) error {
func install(cmd *cmdflags.Cmd, pkgArgs pkgs.Packages) error {
apt, err := pkgs.NewApt()
if err != nil {
return fmt.Errorf("error initializing APT: %v", err)
@ -63,15 +64,15 @@ func install(cmd *Cmd, pkgArgs pkgs.Packages) error {
logging.Info("Wrote manifest to %s.", manifestPath)
// Set GitHub Actions outputs
SetPackageVersionList(pkgArgs)
SetAllPackageVersionList(installedPkgs)
cmd.GhioPrinter.SetOutput("package-version-list", pkgArgs)
cmd.GhioPrinter.SetOutput("all-package-version-list", installedPkgs)
logging.Info("Completed package installation.")
return nil
}
func GetInstallCmd() *Cmd {
cmd := &Cmd{
func GetInstallCmd() *cmdflags.Cmd {
cmd := &cmdflags.Cmd{
Name: "install",
Description: "Install packages and saves them to the cache",
Flags: flag.NewFlagSet("install", flag.ExitOnError),
@ -101,6 +102,6 @@ func GetInstallCmd() *Cmd {
"--cache-dir ~/cache_dir --version userver1 --global-version 20250812",
"--cache-dir /tmp/cache_dir --version what_ever --global-version whatever_too",
}
cmd.ExamplePackages = ExamplePackages
cmd.ExamplePackages = cmdflags.ExamplePackages
return cmd
}

View file

@ -1,22 +1,24 @@
package main
import (
"awalsh128.com/cache-apt-pkgs-action/cmd/cache_apt_pkgs/cmdflags"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
)
func main() {
logging.Init(true)
commands := CreateCmds(
commands := cmdflags.CreateCmds(
GetCreateKeyCmd(),
GetInstallCmd(),
GetRestoreCmd(),
GetSetupCmd(),
GetCleanupCmd(),
GetValidateCmd(),
)
cmd, pkgArgs := commands.Parse()
err := cmd.Run(cmd, pkgArgs)
cmd, pkgArgs, err := commands.Parse()
if err != nil {
logging.Fatalf("error: %v\n", err)
}
err = cmd.Run(cmd, pkgArgs)
if err != nil {
logging.Fatalf("error: %v\n", err)
}

View file

@ -5,9 +5,22 @@ import (
"os/exec"
"path/filepath"
"testing"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
atesting "awalsh128.com/cache-apt-pkgs-action/internal/testing"
)
// Integration test for the real commands used by main.
// SetupTest performs per-test initialization and registers cleanup hooks.
func SetupTest(t *testing.T) {
logging.Init(true)
t.Cleanup(func() {
logging.InitDefault()
})
}
// Integration test for main processing actual and non-existent commands.
//
// NOTE: No args are tested, just help as an example of a valid command.
func TestIntegration_MainCommands(t *testing.T) {
// Build the binary first
binaryPath := filepath.Join(t.TempDir(), "cache-apt-pkgs")
@ -152,3 +165,90 @@ func TestIntegration_CommandExecution(t *testing.T) {
})
}
}
type execRunResponse struct {
stdout string
stderr string
err error
ghVars map[string]string
}
func execBinaryAndReturnResponse(t *testing.T, binaryPath string, args []string) (response execRunResponse) {
t.Helper()
var err error
stdout, stderr := atesting.CaptureStd(func() {
cmd := exec.Command(binaryPath, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err = cmd.Run()
})
return execRunResponse{
stdout: stdout,
stderr: stderr,
err: err,
}
}
// Simulate a pseudo GitHub Actions workflow using the commands
func TestIntegration_PseudoActionWorkflow(t *testing.T) {
const cacheDirName = "cache-apt-pkgs-action-cache"
const pkgs = "xdot rolldice"
// This test simulates a pseudo GitHub Actions workflow using the commands
tmpDir := t.TempDir()
cacheDir := filepath.Join(tmpDir, cacheDirName)
if err := os.MkdirAll(cacheDir, 0755); err != nil {
t.Fatalf("Failed to create cache dir: %v", err)
}
// Build the binary
binaryPath := filepath.Join(tmpDir, "cache-apt-pkgs")
cmd := exec.Command("go", "build", "-o", binaryPath, ".")
cmd.Dir = "."
if err := cmd.Run(); err != nil {
t.Fatalf("Failed to build binary: %v", err)
}
// Step 1: Validate packages
response := execBinaryAndReturnResponse(t, binaryPath, []string{"validate", pkgs})
if response.err != nil {
t.Fatalf("validate failed: %v", response.err)
}
// Step 2: Create cache key
response = execBinaryAndReturnResponse(t, binaryPath, []string{"createkey",
"--cache-dir", cacheDir,
"--version", "1.0",
"--global-version", "1.0",
"--ciphertext-path", filepath.Join(cacheDir, "cache_key.sha256"),
"--plaintext-path", filepath.Join(cacheDir, "cache_key.txt"),
pkgs})
if response.err != nil {
t.Fatalf("createkey command failed: %v", response.err)
}
// if response.ghVars["cache-hit"] == "true" {
// t.Log("Cache hit detected, executing restore.")
// // Step 4b: Restore packages
// response = execBinaryAndReturnResponse(t, binaryPath, []string{"restore",
// "--cache-dir", cacheDir,
// pkgs})
// if response.err != nil {
// t.Logf("restore failed: %v", response.err)
// }
// } else {
// t.Log("No cache hit, executing install.")
// // Step 4a: Install packages
// response = execBinaryAndReturnResponse(t, binaryPath, []string{"install",
// "--cache-dir", cacheDir,
// "--version", "1.0",
// "--global-version", "1.0",
// pkgs})
// if response.err != nil {
// t.Logf("install command failed: %v", response.err)
// }
// }
// t.Log("Pseudo GitHub Actions workflow simulation completed.")
}

View file

@ -2,16 +2,16 @@ package main
import (
"testing"
"awalsh128.com/cache-apt-pkgs-action/cmd/cache_apt_pkgs/cmdflags"
)
func TestMain_CommandStructure(t *testing.T) {
// Test that all commands are properly initialized
commands := CreateCmds(
commands := cmdflags.CreateCmds(
GetCreateKeyCmd(),
GetInstallCmd(),
GetRestoreCmd(),
GetSetupCmd(),
GetCleanupCmd(),
GetValidateCmd(),
)
@ -29,12 +29,10 @@ func TestMain_CommandStructure(t *testing.T) {
}
func TestMain_AllCommandsHaveRequiredFields(t *testing.T) {
commands := CreateCmds(
commands := cmdflags.CreateCmds(
GetCreateKeyCmd(),
GetInstallCmd(),
GetRestoreCmd(),
GetSetupCmd(),
GetCleanupCmd(),
GetValidateCmd(),
)

View file

@ -5,12 +5,13 @@ import (
"fmt"
"path/filepath"
"awalsh128.com/cache-apt-pkgs-action/cmd/cache_apt_pkgs/cmdflags"
"awalsh128.com/cache-apt-pkgs-action/internal/cache"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
func restore(cmd *Cmd, pkgArgs pkgs.Packages) error {
func restore(cmd *cmdflags.Cmd, pkgArgs pkgs.Packages) error {
manifestPath := filepath.Join(cmd.StringFlag("cache-dir"), "manifest.json")
logging.Info("Reading manifest from %s.", manifestPath)
@ -27,15 +28,15 @@ func restore(cmd *Cmd, pkgArgs pkgs.Packages) error {
installedPkgs := pkgs.NewPackages(installedPkgList...)
// Set GitHub Actions outputs
SetPackageVersionList(pkgArgs)
SetAllPackageVersionList(installedPkgs)
cmd.GhioPrinter.SetOutput("package-version-list", pkgArgs)
cmd.GhioPrinter.SetOutput("all-package-version-list", installedPkgs)
logging.Info("Completed package restoration.")
return nil
}
func GetRestoreCmd() *Cmd {
cmd := &Cmd{
func GetRestoreCmd() *cmdflags.Cmd {
cmd := &cmdflags.Cmd{
Name: "restore",
Description: "Restore packages from the cache",
Flags: flag.NewFlagSet("restore", flag.ExitOnError),
@ -52,7 +53,7 @@ func GetRestoreCmd() *Cmd {
"--cache-dir ~/cache_dir --restore-root / --execute-scripts true",
"--cache-dir /tmp/cache_dir --restore-root /",
}
cmd.ExamplePackages = ExamplePackages
cmd.ExamplePackages = cmdflags.ExamplePackages
return cmd
}

View file

@ -5,11 +5,12 @@ import (
"fmt"
"strings"
"awalsh128.com/cache-apt-pkgs-action/cmd/cache_apt_pkgs/cmdflags"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
func validate(cmd *Cmd, pkgArgs pkgs.Packages) error {
func validate(cmd *cmdflags.Cmd, pkgArgs pkgs.Packages) error {
apt, err := pkgs.NewApt()
if err != nil {
return fmt.Errorf("error initializing APT: %v", err)
@ -32,13 +33,13 @@ func validate(cmd *Cmd, pkgArgs pkgs.Packages) error {
return nil
}
func GetValidateCmd() *Cmd {
cmd := &Cmd{
func GetValidateCmd() *cmdflags.Cmd {
cmd := &cmdflags.Cmd{
Name: "validate",
Description: "Validate package arguments",
Flags: flag.NewFlagSet("validate", flag.ExitOnError),
Run: validate,
}
cmd.ExamplePackages = ExamplePackages
cmd.ExamplePackages = cmdflags.ExamplePackages
return cmd
}

View file

@ -1,3 +1,4 @@
// Package cio provides common I/O operations for the application,
// including JSON serialization, console stream capturing, and file handling.
// including JSON serialization, console stream capturing, file handling and Github Actions
// environment variable management.
package cio

View file

@ -1,12 +1,11 @@
package logging
import (
"encoding/json"
"io"
"log"
"os"
"sync"
"awalsh128.com/cache-apt-pkgs-action/internal/cio"
)
// loggerWrapper encapsulates a standard logger with additional functionality.
@ -42,6 +41,8 @@ func createDefault() loggerWrapper {
// This affects all subsequent log messages from this package.
// Thread-safe operation that can be called at any time.
func SetOutput(writer io.Writer) {
loggerMu.Lock()
defer loggerMu.Unlock()
logger.wrapped.SetOutput(writer)
}
@ -116,12 +117,12 @@ func Debug(format string, a ...any) {
func DumpVars(a ...any) {
if DebugEnabled {
for _, v := range a {
json, err := cio.ToJSON(v)
content, err := json.MarshalIndent(v, "", " ")
if err != nil {
Info("warning: unable to dump variable: %v", err)
continue
}
logger.wrapped.Println(json)
logger.wrapped.Println(content)
}
}
}

View file

@ -1,83 +0,0 @@
#!/bin/bash
#==============================================================================
# export_version.sh
#==============================================================================
#
# DESCRIPTION:
# Script to export Go library version information for package development.
# Extracts and exports version information from go.mod including Go version,
# toolchain version, and syspkg version.
#
# USAGE:
# export_version.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
parse_common_args "$@" >/dev/null # prevent return from echo'ng
# Function to extract Go version from go.mod
get_go_version() {
local go_version
go_version=$(grep "^go " "${PROJECT_ROOT}/go.mod" | awk '{print $2}')
log_debug "Extracted Go version: ${go_version}"
echo "${go_version}"
}
# Function to extract toolchain version from go.mod
get_toolchain_version() {
local toolchain_version
toolchain_version=$(grep "^toolchain " "${PROJECT_ROOT}/go.mod" | awk '{print $2}')
log_debug "Extracted toolchain version: ${toolchain_version}"
echo "${toolchain_version}"
}
# Function to extract syspkg version from go.mod
get_syspkg_version() {
local syspkg_version
syspkg_version=$(grep "github.com/awalsh128/syspkg" "${PROJECT_ROOT}/go.mod" | awk '{print $2}')
log_debug "Extracted syspkg version: ${syspkg_version}"
echo "${syspkg_version}"
}
# Export versions as environment variables
log_info "Exporting version information..."
GO_VERSION=$(get_go_version)
export GO_VERSION
TOOLCHAIN_VERSION=$(get_toolchain_version)
export TOOLCHAIN_VERSION
SYSPKG_VERSION=$(get_syspkg_version)
export SYSPKG_VERSION
# Create a version info file
VERSION_FILE="${PROJECT_ROOT}/.version-info"
log_debug "Creating version file: ${VERSION_FILE}"
cat >"${VERSION_FILE}" <<EOF
# Version information for cache-apt-pkgs-action
GO_VERSION=${GO_VERSION}
TOOLCHAIN_VERSION=${TOOLCHAIN_VERSION}
SYSPKG_VERSION=${SYSPKG_VERSION}
EXPORT_DATE=$(date '+%Y-%m-%d %H:%M:%S')
EOF
echo "Version information has been exported to ${VERSION_FILE}"
echo "Go Version: ${GO_VERSION}"
echo "Toolchain Version: ${TOOLCHAIN_VERSION}"
echo "Syspkg Version: ${SYSPKG_VERSION}"
# Also create a JSON format for tools that prefer it
VERSION_JSON="${PROJECT_ROOT}/.version-info.json"
cat >"${VERSION_JSON}" <<EOF
{
"goVersion": "${GO_VERSION}",
"toolchainVersion": "${TOOLCHAIN_VERSION}",
"syspkgVersion": "${SYSPKG_VERSION}",
"exportDate": "$(date '+%Y-%m-%d %H:%M:%S')"
}
EOF
echo "Version information also exported in JSON format to ${VERSION_JSON}"

View file

@ -1,27 +0,0 @@
#!/bin/bash
#==============================================================================
# fix_and_update.sh
#==============================================================================
#
# DESCRIPTION:
# Runs lint fixes and checks for UTF-8 formatting issues in the project.
# Intended to help maintain code quality and formatting consistency.
#
# USAGE:
# fix_and_update.sh
#
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
parse_common_args "$@" >/dev/null # prevent return from echo'ng
print_status "Running trunk format and code check..."
require_command trunk "Install trunk to run lint fixes via curl https://get.trunk.io -fsSL | bash."
trunk check --all --ci
trunk fmt --all --ci
log_success "All lint fixes applied and checks complete."

513
scripts/lib.sh Executable file → Normal file
View file

@ -1,322 +1,226 @@
#!/bin/bash
#==============================================================================
# lib.sh
# lib.sh - Core Shell Script Library
#==============================================================================
#
# DESCRIPTION:
# Enhanced common shell script library for project utilities and helpers.
# Provides functions for logging, error handling, argument parsing, file operations,
# command validation, and development workflow tasks.
# Enhanced common shell script library providing core functionality for all
# project scripts. Includes logging, error handling, argument parsing, file
# operations, command validation, and workflow utilities.
#
# USAGE:
# source "$(cd "$(dirname "$0")" && pwd)/lib.sh"
#
# FEATURES:
# - Consistent logging and output formatting
# - Command existence and dependency checking
# - File and directory operations
# - Project structure helpers
# - Development tool installation helpers
# - Error handling and validation
# FUNCTIONS:
# Logging:
# log_info <msg> Log informational message to stdout
# log_warn <msg> Log warning message to stderr
# log_error <msg> Log error message to stderr
# log_success <msg> Log success message to stdout
# log_debug <msg> Log debug message to stderr (if VERBOSE=true)
# print_header <text> Print formatted section header
#
# Arguments:
# parse_common_args Process standard script arguments (-h, -v, -q)
# show_help Display script-specific help from header comments
#
# UI Utilities:
# echo_color <color> <msg> Print message in specified color
# confirm <prompt> Prompt for yes/no confirmation
# pause Wait for user input to continue
#
# DEPENDENCIES:
# - bash 4.0+ (for associative arrays and other features)
# - coreutils (tput, basename, readlink)
#
# ENVIRONMENT:
# VERBOSE Enable verbose debug logging (default: false)
# QUIET Suppress non-error output (default: false)
# PS4 Debug prefix format when running with bash -x
#
# EXAMPLES:
# source ./lib.sh
# log_info "Starting process..."
# parse_common_args "$@"
# confirm "Continue?" || exit 1
#==============================================================================
# Exit on error by default for sourced scripts
set -eE -o functrace
# Enable strict error handling
set -eEuo pipefail
# Detect debugging flag (bash -x) and also print line numbers
[[ $- == *"x"* ]] && PS4='+$(basename ${BASH_SOURCE[0]}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
# Global variables
export VERBOSE=${VERBOSE:-false}
export QUIET=${QUIET:-false}
export SCRIPT_DIRNAME="scripts"
# Script configuration and runtime control
export VERBOSE=${VERBOSE:-false} # Enable verbose debug logging
export QUIET=${QUIET:-false} # Suppress non-error output
export SCRIPT_DIRNAME="scripts" # Standard scripts directory name
#==============================================================================
# Logging Functions
# ANSI Color and Text Formatting Codes
#==============================================================================
# These color codes are used by the echo_color and logging functions to provide
# consistent terminal output formatting across all scripts.
export GREEN='\033[0;32m'
export RED='\033[0;31m'
export YELLOW='\033[0;33m'
export BLUE='\033[0;34m'
export CYAN='\033[0;36m'
export MAGENTA='\033[0;35m'
export NC='\033[0m' # No Color
export BOLD='\033[1m'
export DIM='\033[2m'
export BLINK='\033[5m'
# Basic colors
export GREEN='\033[0;32m' # Success, completion
export RED='\033[0;31m' # Errors, failures
export YELLOW='\033[0;33m' # Warnings, cautions
export BLUE='\033[0;34m' # Information, headers
export CYAN='\033[0;36m' # Processing status
export MAGENTA='\033[0;35m' # Special notifications
echo_color() {
# Text formatting
export NC='\033[0m' # Reset all formatting
export BOLD='\033[1m' # Bold/bright text
export DIM='\033[2m' # Dim/muted text
export BLINK='\033[5m' # Blinking text
# Print text in the specified color with proper formatting
# Arguments:
# [-e|-n] Optional echo flags (-e enable escapes, -n no newline)
# color Color name (green|red|yellow|blue|cyan|magenta)
# message Text to print in the specified color
# Example:
# echo_color green "Operation successful!"
# echo_color -n blue "Processing..."
function echo_color() {
local echo_flags=()
# Collect echo flags (start with -)
# Collect valid echo flags that start with dash
while [[ $1 == -* ]]; do
if [[ $1 == "-e" || $1 == "-n" ]]; then
echo_flags+=("$1")
fi
shift
done
# Convert color name to uppercase variable name
local color="$1"
local color_var
color_var=$(echo "${color}" | tr '[:lower:]' '[:upper:]')
shift
# Print message with color codes and any specified flags
echo -e "${echo_flags[@]}" "${!color_var}$*${NC}"
}
#==============================================================================
# Logging Functions
# Logging and Output Functions
#==============================================================================
# A comprehensive set of logging functions that provide consistent formatting and
# behavior for different types of messages. All functions respect the QUIET and
# VERBOSE environment variables for controlled output.
log_info() {
# Log an informational message to stdout
# Arguments:
# message The information to log
# Respects: QUIET=true will suppress output
function log_info() {
if ! ${QUIET}; then
echo -e "${BLUE}[INFO]${NC} $1"
fi
}
log_warn() {
# Log a warning message to stderr
# Arguments:
# message The warning to log
# Notes: Not affected by QUIET mode
function log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1" >&2
}
log_error() {
# Log an error message to stderr
# Arguments:
# message The error message to log
# Notes: Not affected by QUIET mode
function log_error() {
echo -e "${RED}[ERROR]${NC} $1" >&2
}
log_success() {
# Log a success message to stdout
# Arguments:
# message The success message to log
# Respects: QUIET=true will suppress output
function log_success() {
if ! ${QUIET}; then
echo -e "${GREEN}[SUCCESS]${NC} $1"
fi
}
log_debug() {
# Log a debug message to stderr if verbose mode is enabled
# Arguments:
# message The debug information to log
# Requires: VERBOSE=true to display output
function log_debug() {
if ${VERBOSE}; then
echo -e "${DIM}[DEBUG]${NC} $1" >&2
fi
}
# Print formatted headers
print_header() {
# Print a formatted section header with proper spacing
# Arguments:
# text The header text to display
# Respects: QUIET=true will suppress output
function print_header() {
if ! ${QUIET}; then
echo -en "\n${BOLD}${BLUE}$1${NC}\n"
fi
}
print_section() {
if ! ${QUIET}; then
echo -en "\n${CYAN}${BOLD}$1${NC}\n\n"
fi
}
print_option() {
if ! ${QUIET}; then
echo -en "${YELLOW}$1)${CYAN} $2${NC}\n"
fi
}
print_status() {
if ! ${QUIET}; then
echo -en "${GREEN}==>${NC} $1\n"
fi
}
print_success() {
if ! ${QUIET}; then
echo -en "${GREEN}${BOLD}$1${NC}\n"
fi
}
#==============================================================================
# Error Handling
# Command-line Argument Processing Functions
#==============================================================================
# These functions provide standardized argument parsing and help display across
# all scripts. They handle common flags (-h, -v, -q) and extract help text
# from script headers.
fail() {
# Usage: fail [message] [exit_code]
local msg="${1-}"
local exit_code="${2:-1}"
if [[ -n ${msg} ]]; then
log_error "${msg}"
fi
exit "${exit_code}"
}
# Display formatted help text extracted from the calling script's header comments
# Usage:
# show_help # Called automatically by parse_common_args for -h flag
# Returns:
# Prints formatted help text to stdout
# Notes:
# - Extracts text between the first #=== block in the script header
# - Removes comment markers (#) and formats for clean display
# - Returns early with message if script file cannot be found
function show_help() {
# Extract header comment block from calling script
local script_file="${BASH_SOURCE[1]}"
# Trap handler for cleanup
cleanup_on_exit() {
local exit_code=$?
[[ -n ${TEMP_DIR} && -d ${TEMP_DIR} ]] && rm -rf "${TEMP_DIR}"
[[ ${exit_code} -eq 0 ]] && exit 0
local i
for ((i = ${#FUNCNAME[@]} - 1; i; i--)); do
echo "${BASH_SOURCE[i]}:${BASH_LINENO[i]}: ${FUNCNAME[i]}"
done
exit "${exit_code}"
}
setup_cleanup() {
trap 'cleanup_on_exit' EXIT
}
#==============================================================================
# Command and Dependency Checking
#==============================================================================
command_exists() {
command -v "$1" >/dev/null 2>&1
}
require_command() {
local cmd="$1"
local install_msg="${2:-Please install ${cmd}}"
if ! command_exists "${cmd}"; then
fail "${cmd} is required. ${install_msg}"
fi
log_debug "Found required command: ${cmd}"
}
require_script() {
local script="$1"
if [[ ! -x ${script} ]]; then
fail "${script} is required and must be executable. This script has a bug."
fi
log_debug "Found required script: ${script}"
}
npm_package_installed() {
npm list -g "$1" >/dev/null 2>&1
}
go_tool_installed() {
go list -m "$1" >/dev/null 2>&1 || command_exists "$(basename "$1")"
}
#==============================================================================
# File and Directory Operations
#==============================================================================
file_exists() {
[[ -f $1 ]]
}
dir_exists() {
[[ -d $1 ]]
}
ensure_dir() {
[[ ! -d $1 ]] && mkdir -p "$1"
log_debug "Ensured directory exists: $1"
}
create_temp_dir() {
TEMP_DIR=$(mktemp -d)
log_debug "Created temporary directory: ${TEMP_DIR}"
echo "${TEMP_DIR}"
}
safe_remove() {
local path="$1"
if [[ -e ${path} ]]; then
rm -rf "${path}"
log_debug "Removed: ${path}"
fi
}
#==============================================================================
# Project Structure Helpers
#==============================================================================
get_project_root() {
local root
if command_exists git; then
root=$(git rev-parse --show-toplevel 2>/dev/null || true)
fi
if [[ -n ${root} ]]; then
echo "${root}"
else
# Fallback to current working directory
pwd
fi
}
PROJECT_ROOT="$(get_project_root)"
export PROJECT_ROOT
#==============================================================================
# Development Tool Helpers
#==============================================================================
install_trunk() {
if command_exists trunk; then
log_debug "trunk already installed"
return 0
if [[ ! -f ${script_file} ]]; then
echo "Help information not available"
return
fi
log_info "Installing trunk..."
curl -fsSL https://get.trunk.io | bash
log_success "trunk installed successfully"
# Process the header block and format output
local lines=$'\n'
local inside_header=false
while IFS= read -r line; do
if [[ ${inside_header} == true ]]; then
[[ ${line} =~ ^#\=+ ]] && continue
if [[ ${line} =~ ^# ]]; then
lines+="${line#\#}"$'\n'
else
break
fi
fi
[[ ${line} =~ ^#\=+ ]] && inside_header=true
done <"${script_file}"
printf "%s" "${lines}"
}
install_doctoc() {
require_command npm "Please install Node.js and npm first"
if npm_package_installed doctoc; then
log_debug "doctoc already installed"
return 0
fi
log_info "Installing doctoc..."
npm install -g doctoc
log_success "doctoc installed successfully"
}
install_go_tools() {
local tools=(
"golang.org/x/tools/cmd/goimports@latest"
"github.com/segmentio/golines@latest"
"github.com/golangci/golangci-lint/cmd/golangci-lint@latest"
)
log_info "Installing Go development tools..."
for tool in "${tools[@]}"; do
log_info "Installing $(basename "${tool}")..."
go install "${tool}"
done
log_success "Go tools installed successfully"
}
#==============================================================================
# Validation Helpers
#==============================================================================
validate_go_project() {
require_command go "Please install Go first"
local project_root
project_root=$(get_project_root)
if [[ ! -f "${project_root}/go.mod" ]]; then
fail "Not a Go project (no go.mod found)"
fi
log_debug "Validated Go project structure"
}
validate_git_repo() {
require_command git "Please install git first"
local project_root
project_root=$(get_project_root)
if [[ ! -d "${project_root}/.git" ]]; then
fail "Not a git repository"
fi
log_debug "Validated git repository"
}
#==============================================================================
# Common Argument Parsing
#==============================================================================
parse_common_args() {
# Process common command-line arguments used across all scripts
# Arguments:
# $@ All script arguments to process
# Options:
# -h, --help Show help text and exit
# -v, --verbose Enable verbose debug output
# -q, --quiet Suppress non-error output
# Returns:
# Prints any unhandled arguments to stdout for capture by caller
# Returns 0 on success
function parse_common_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-h | --help)
@ -335,14 +239,13 @@ parse_common_args() {
shift
;;
*)
# Return unhandled arguments
# Stop at first non-flag argument
break
;;
esac
done
# Return remaining arguments
# Echo any remaining unhandled arguments for callers to capture
# Return any unprocessed arguments to caller
if [[ $# -gt 0 ]]; then
echo "$@"
fi
@ -350,85 +253,37 @@ parse_common_args() {
}
#==============================================================================
# Common Operations
# Interactive User Interface Functions
#==============================================================================
# These functions provide consistent user interaction patterns across scripts,
# including confirmation prompts and execution pauses. All respect the QUIET
# environment variable for automated execution.
run_with_status() {
local description="$1"
shift
local cmd="$*"
print_status "${description}"
log_debug "Running: ${cmd}"
if eval "${cmd}"; then
log_success "${description} completed"
return 0
else
local exit_code=$?
log_error "${description} failed (exit code: ${exit_code})"
return "${exit_code}"
fi
}
update_go_modules() {
run_with_status "Updating Go modules" "go mod tidy && go mod verify"
}
run_tests() {
run_with_status "Running tests" "go test -v ./..."
}
run_build() {
run_with_status "Building project" "go build -v ./..."
}
run_lint() {
require_command trunk "Please install trunk first"
run_with_status "Running linting" "trunk check"
}
#==============================================================================
# Default Help Function
#==============================================================================
show_help() {
# Extract header comment block and format it
local script_file="${BASH_SOURCE[1]}"
if [[ ! -f ${script_file} ]]; then
echo "Help information not available"
return
fi
local lines=$'\n'
local inside_header=false
while IFS= read -r line; do
if [[ ${inside_header} == true ]]; then
[[ ${line} =~ ^#\=+ ]] && continue
if [[ ${line} =~ ^# ]]; then
lines+="${line#\#}"$'\n'
else
break
fi
fi
[[ ${line} =~ ^#\=+ ]] && inside_header=true
done <"${script_file}"
printf "%s" "${lines}"
}
#==============================================================================
# Utility Functions
#==============================================================================
pause() {
# Pause script execution until user presses any key
# Usage:
# pause # Shows prompt and waits for keypress
# Notes:
# - Automatically skipped if QUIET=true
# - Adds newlines before and after prompt for clean formatting
function pause() {
[[ ${QUIET} == true ]] && return
echo
read -n 1 -s -r -p "Press any key to continue..."
echo
}
confirm() {
# Prompt user for yes/no confirmation
# Arguments:
# prompt Optional custom prompt (default: "Are you sure?")
# Usage:
# confirm "Delete all files?" || exit 1
# Returns:
# 0 if user confirms (yes)
# 1 if user declines (no)
# Notes:
# - Accepts y, yes, n, no (case insensitive)
# - Repeats prompt until valid input received
function confirm() {
local prompt="${1:-Are you sure?}"
local response
@ -441,21 +296,3 @@ confirm() {
esac
done
}
#==============================================================================
# Initialization
#==============================================================================
# Set up cleanup trap when library is sourced
setup_cleanup
init() {
parse_common_args "$@"
if [[ ${BASH_SOURCE[0]} == "${0}" ]]; then
echo "This script should be sourced, not executed directly."
# shellcheck disable=SC2016
echo 'Usage: source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"/lib.sh'
exit 1
fi
}
# Do not auto-run init when this file is sourced; allow callers to invoke init() explicitly if needed.

View file

@ -1,161 +0,0 @@
#!/bin/bash
#==============================================================================
# menu.sh
#==============================================================================
#
# DESCRIPTION:
# Streamlined interactive menu for essential development tasks.
# Provides quick access to the most commonly used development operations.
#
# USAGE:
# menu.sh
#
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
SCRIPT_DIR="${PROJECT_ROOT}/scripts"
CAP_CMD_DIR="${PROJECT_ROOT}/cmd/cache_apt_pkgs"
parse_common_args "$@" >/dev/null # prevent return from echo'ng
#==============================================================================
# Menu Operations
#==============================================================================
run_task() {
local description="$1"
shift
local cmd="$*"
print_status "Running: ${description}"
[[ ${VERBOSE} == true ]] && log_debug "Command: ${cmd}"
echo
if eval "${cmd}"; then
log_success "${description} completed successfully"
else
local exit_code=$?
log_error "${description} failed (exit code: ${exit_code})"
fi
pause
}
show_project_status() {
print_header "Project Status"
echo "Git Status:"
git status --short --branch
echo
echo "Go Module Status:"
go mod verify && log_success "Go modules are valid"
echo
if command_exists trunk; then
echo "Linting Status:"
trunk check --no-fix --quiet || log_warn "Linting issues detected"
echo
fi
pause
}
#==============================================================================
# Main Menu Loop
#==============================================================================
main_menu() {
while true; do
clear
print_header "Cache Apt Packages - Development Menu"
print_section "Essential Tasks:"
print_option 1 "Setup Development Environment"
print_option 2 "Run All Checks (test + lint + build)"
print_option 3 "Test Only"
print_option 4 "Lint & Fix"
print_option 5 "Build Project"
print_section "Maintenance:"
print_option 6 "Update Documentation (TOCs)"
print_option 7 "Export Version Info"
print_section "Information:"
print_option 8 "Project Status"
print_option 9 "Recent Changes"
echo
print_option q "Quit"
echo
echo_color -n green "choice > "
read -n 1 -rp "" choice
printf "\n\n"
case ${choice} in
1)
run_task "Setting up development environment" \
"${SCRIPT_DIR}/setup_dev.sh"
;;
2)
print_header "Running All Checks"
echo ""
run_task "Running linting" "trunk check --fix"
run_task "Building project" "go build -v ${CAP_CMD_DIR}"
run_task "Running tests" "go test -v ${CAP_CMD_DIR}"
;;
3)
run_task "Running tests" "go test -v ${CAP_CMD_DIR}"
;;
4)
run_task "Running lint with fixes" "trunk check --fix"
;;
5)
run_task "Building project" "go build -v ${CAP_CMD_DIR}"
;;
6)
run_task "Updating documentation TOCs" \
"${SCRIPT_DIR}/update_md_tocs.sh"
;;
7)
run_task "Exporting version information" \
"${SCRIPT_DIR}/export_version.sh"
;;
8)
show_project_status
;;
9)
print_header "Recent Changes"
git log --oneline --graph --decorate -n 10
pause
;;
q | Q | "")
echo -e "${GREEN}Goodbye!${NC}"
exit 0
;;
*)
echo ""
log_error "Invalid option: ${choice}"
pause
;;
esac
done
}
#==============================================================================
# Entry Point
#==============================================================================
# Validate project structure
# validate_go_project
# validate_git_repo
# Parse any command line arguments
parse_common_args "$@"
# Run main menu
main_menu

View file

@ -1 +0,0 @@
#!/bin/bash

View file

@ -1,164 +0,0 @@
#!/bin/bash
#==============================================================================
# setup_dev.sh
#==============================================================================
#
# DESCRIPTION:
# Sets up the development environment for the cache-apt-pkgs-action project.
# Installs all necessary tools, configures Go environment, and sets up
# pre-commit hooks.
#
# USAGE:
# setup_dev.sh [options]
#
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
parse_common_args "$@"
#==============================================================================
# Setup Functions
#==============================================================================
check_prerequisites() {
print_status "Checking prerequisites"
require_command go "Please install Go first (https://golang.org/dl/)"
require_command npm "Please install Node.js and npm first (https://nodejs.org/)"
require_command git "Please install git first"
require_command curl "Please install curl first"
log_success "All prerequisites are available"
}
setup_go_environment() {
validate_go_project
print_status "Configuring Go environment"
go env -w GO111MODULE=auto
update_go_modules
}
install_development_tools() {
print_status "Installing development tools"
install_trunk
install_doctoc
install_go_tools
log_success "All development tools installed"
}
setup_git_hooks() {
validate_git_repo
print_status "Setting up Git hooks"
# Initialize trunk if not already done
if [[ ! -f .trunk/trunk.yaml ]]; then
log_info "Initializing trunk configuration"
trunk init
fi
# Configure git hooks
git config core.hooksPath .git/hooks/
log_success "Git hooks configured"
}
update_project_documentation() {
print_status "Updating project documentation"
local update_script="${SCRIPT_DIR}/update_md_tocs.sh"
if [[ -x ${update_script} ]]; then
"${update_script}"
else
log_warn "Markdown TOC update script not found or not executable"
fi
}
run_initial_checks() {
print_status "Running initial project validation"
# Run trunk check
if command_exists trunk; then
run_with_status "Running initial linting" "trunk check --no-fix"
fi
# Run tests
run_tests
log_success "Initial validation completed"
}
display_completion_message() {
print_header "Development Environment Setup Complete!"
echo "Available commands:"
echo " • Run tests: go test ./..."
echo " • Run linting: trunk check"
echo " • Update documentation: ./scripts/update_md_tocs.sh"
echo " • Interactive menu: ./scripts/menu.sh"
echo
log_success "Ready for development!"
}
#==============================================================================
# Main Setup Process
#==============================================================================
main() {
# Parse command line arguments first
while [[ $# -gt 0 ]]; do
case $1 in
-v | --verbose)
export VERBOSE=true
;;
-h | --help)
cat <<'EOF'
USAGE:
setup_dev.sh [OPTIONS]
DESCRIPTION:
Sets up the development environment for the cache-apt-pkgs-action project.
Installs all necessary tools, configures Go environment, and sets up
pre-commit hooks.
OPTIONS:
-v, --verbose Enable verbose output
-h, --help Show this help message
EOF
exit 0
;;
*)
echo "Unknown option: $1" >&2
echo "Use --help for usage information." >&2
exit 1
;;
esac
shift
done
print_header "Setting up Development Environment"
# Run setup steps
check_prerequisites
setup_go_environment
install_development_tools
setup_git_hooks
update_project_documentation
run_initial_checks
display_completion_message
}
#==============================================================================
# Entry Point
#==============================================================================
main "$@"

View file

@ -1,64 +0,0 @@
#!/bin/bash
#==============================================================================
# export_version_test.sh
#==============================================================================
#
# DESCRIPTION:
# Test suite for export_version.sh script.
# Validates version extraction, file generation, and error handling.
#
# USAGE:
# export_version_test.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
# Source the test framework, exports SCRIPT_PATH
source "$(git rev-parse --show-toplevel)/scripts/tests/test_lib.sh"
# Define test functions
run_tests() {
test_section "Command Line Interface"
test_case "basic execution" \
"" \
"Exporting version information" \
true
test_section "File Generation"
test_case "version info file creation" \
"" \
"Version information has been exported" \
true
test_case "JSON file creation" \
"" \
"exported in JSON format" \
true
test_section "File Contents Validation"
local project_root
project_root=$(get_project_root)
# Test that files exist and contain expected content
test_file_exists "version info file exists" "${project_root}/.version-info"
test_file_exists "JSON version file exists" "${project_root}/.version-info.json"
test_file_contains "version file contains Go version" \
"${project_root}/.version-info" \
"GO_VERSION="
test_file_contains "JSON file contains Go version" \
"${project_root}/.version-info.json" \
'"goVersion":'
}
# Start the test framework and run tests
start_tests "$@"
run_tests

View file

@ -1,48 +0,0 @@
#!/bin/bash
#==============================================================================
# setup_dev_test.sh
#==============================================================================
#
# DESCRIPTION:
# Test script for setup_dev.sh functionality.
# Validates development environment setup without modifying the actual system.
#
# USAGE:
# setup_dev_test.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
# Source the test framework, exports SCRIPT_PATH
source "$(git rev-parse --show-toplevel)/scripts/tests/test_lib.sh"
# Define test functions
run_tests() {
test_section "Help and Usage"
test_case "shows help message" \
"--help" \
"USAGE:" \
true
test_case "shows error for invalid option" \
"--invalid-option" \
"Unknown option" \
false
test_section "Argument Processing"
test_case "accepts verbose flag" \
"--verbose --help" \
"USAGE:" \
true
}
# Start the test framework and run tests
start_tests "$@"
run_tests

View file

@ -1,49 +0,0 @@
#!/bin/bash
#==============================================================================
# <script name>.sh
#==============================================================================
#
# DESCRIPTION:
# Test suite for <script name>.sh. Validates <brief description of what is
# being tested>.
#
# USAGE:
# <script name>.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
# Source the test framework, exports SCRIPT_PATH
source "$(git rev-parse --show-toplevel)/scripts/tests/test_lib.sh"
# Define test functions
run_tests() {
# Prints "Testing group <test group one name>"
print_group "<test group one name>"
# Prints "Testing section <test section 1 name>"
print_section "<test section 1 name>"
test_case "<test name 1>" \
"" \
"No changes made" \
false
test_case "<test name 2>" \
"with_changes" \
"" \
false
# Prints "Testing section <test section 2 name>"
print_section "<test section 2 name>"
test_case "<test name 3>" \
"" \
"No changes made" \
false
}

View file

@ -1,465 +0,0 @@
#!/bin/bash
#==============================================================================
# test_lib.sh
#==============================================================================
#
# DESCRIPTION:
# Common test library providing standardized test framework for bash scripts.
# Provides test execution, assertions, test environment setup, and reporting.
# Implements improved architecture patterns for reliable test execution.
#
# USAGE:
# # Set up the script path we want to test BEFORE sourcing
# SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# export SCRIPT_PATH="$SCRIPT_DIR/../script_name.sh"
#
# # Source the test framework
# source "$SCRIPT_DIR/test_lib.sh"
#
# # Define test functions
# run_tests() {
# test_section "Section Name"
# test_case "test name" "args" "expected_output" "should_succeed"
# }
#
# # Start the test framework and run tests
# start_tests "$@"
# run_tests
#
# OPTIONS (inherited from command line):
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
# EXPORTS: For use in test scripts.
# - SCRIPT_PATH Path of the script the test is running against
# - TEMP_TEST_DIR Path to the temporary test directory
# - test_case Function to define a test case
# - test_section Function to define test sections
# - test_file_exists Function to test file existence
# - test_file_contains Function to test file contents
#
# FEATURES:
# - Improved library loading with fallback paths
# - Safe SCRIPT_PATH handling without overriding test settings
# - Arithmetic operations compatible with set -e
# - Proper script name detection for test headers
# - Lazy temporary directory initialization
# - Standardized test case execution and reporting
# - Test environment management with automatic cleanup
# - Comprehensive assertion functions
# - Test statistics and result reporting
#
# ARCHITECTURE IMPROVEMENTS:
# - Library loading uses multiple fallback paths for reliability
# - SCRIPT_PATH variable is preserved from test script initialization
# - Arithmetic increment operations use "|| true" pattern for set -e compatibility
# - Test framework initialization is separated from test execution
# - Temporary directory creation is deferred until actually needed
# - Script name detection iterates through BASH_SOURCE to find actual test script
#
#==============================================================================
# Source the shared library - get the correct path
# shellcheck source="../lib.sh"
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
# Initialize temp directory when needed
__init_temp_dir() {
if [[ -z ${TEMP_TEST_DIR} ]]; then
TEMP_TEST_DIR="$(create_temp_dir)"
export TEMP_TEST_DIR
fi
}
#==============================================================================
# Test Framework Variables
#==============================================================================
TEST_PASS=0
TEST_FAIL=0
TEST_SKIP=0
TEST_START_TIME=""
# Test configuration
TEST_VERBOSE=${TEST_VERBOSE:-false}
TEST_CONTINUE_ON_FAILURE=${TEST_CONTINUE_ON_FAILURE:-true}
#==============================================================================
# Framework Architecture Notes
#==============================================================================
#
# KEY IMPROVEMENTS IMPLEMENTED:
#
# 1. Library Loading Reliability:
# - Multiple fallback paths for lib.sh loading
# - Works from both project root and scripts/ directory
# - Provides clear error messages if lib.sh cannot be found
#
# 2. Variable Management:
# - SCRIPT_PATH is preserved from test script initialization
# - Only initializes variables if not already set
# - Prevents test framework from overriding test script settings
#
# 3. Arithmetic Operations:
# - All increment operations use "|| true" pattern
# - Compatible with bash "set -e" error handling
# - Prevents premature script termination on arithmetic operations
#
# 4. Script Name Detection:
# - Iterates through BASH_SOURCE array to find actual test script
# - Skips test_lib.sh to show correct script name in headers
# - Provides accurate test identification in output
#
# 5. Resource Management:
# - Lazy initialization of temporary directories
# - Only creates temp resources when actually needed
# - Proper cleanup handling with trap functions
#
# 6. Test Organization:
# - Function-based test structure (run_tests pattern)
# - Clear separation of framework initialization and test execution
# - Standardized test case and section patterns
#
#==============================================================================
# Test Environment Setup
#==============================================================================
__setup_test_env() {
TEST_START_TIME=$(date +%s)
__init_temp_dir
trap '__cleanup_test_env' EXIT
log_debug "Test environment setup complete"
log_debug "Temporary directory: ${TEMP_TEST_DIR}"
}
__cleanup_test_env() {
local exit_code=$?
__report_results
if [[ -n ${TEMP_TEST_DIR} && -d ${TEMP_TEST_DIR} ]]; then
safe_remove "${TEMP_TEST_DIR}"
log_debug "Test environment cleanup complete"
fi
exit "${exit_code}"
}
__setup() {
__parse_test_args "$@"
# Find the main test script that sourced us (skip test_lib.sh itself)
local script_name=""
for ((i = 1; i < ${#BASH_SOURCE[@]}; i++)); do
if [[ ${BASH_SOURCE[i]} != *"test_lib.sh" ]]; then
script_name=$(basename "${BASH_SOURCE[i]}")
break
fi
done
print_header "Running ${script_name} tests"
echo ""
__setup_test_env
}
#==============================================================================
# Test Execution Functions
#==============================================================================
test_case() {
local name="$1"
local args="$2"
local expected_output="$3"
local should_succeed="${4:-true}"
# Disable exit-on-error for test execution
set +e
# Support shorthand: test_case "name" "args" "true|false" (no expected_output)
if [[ -z ${expected_output} && (${should_succeed} == "true" || ${should_succeed} == "false") ]]; then
expected_output=""
fi
echo -n "* ${name}... "
[[ ${TEST_VERBOSE} == true ]] && echo -n "(${COMMAND} ${args}) "
local output
local exit_code=0
# Capture both stdout and stderr, ensuring we don't exit on command failure
local cmd="${SCRIPT_PATH} ${args}"
if [[ ${should_succeed} == "true" ]]; then
# For tests that should succeed
output=$(eval "${cmd}" 2>&1)
exit_code=$?
if [[ ${exit_code} -eq 0 && ${output} == *"${expected_output}"* ]]; then
__test_pass "${name}"
else
__test_fail "${name}" "Success with output containing '${expected_output}'" "Exit code ${exit_code} with output: '${output}'"
fi
else
# For tests that should fail
output=$(eval "${cmd}" 2>&1)
exit_code=$?
if [[ ${exit_code} -ne 0 && ${output} == *"${expected_output}"* ]]; then
__test_pass "${name}"
else
__test_fail "${name}" "Failure with output containing '${expected_output}'" "Exit code ${exit_code} with output: '${output}'"
fi
fi
set -e # Restore exit-on-error
}
__test_pass() {
local name="$1"
echo -e "${GREEN}PASS${NC}"
((TEST_PASS++)) || true
[[ ${TEST_VERBOSE} == true ]] && log_debug "Test passed: ${name}"
}
__test_fail() {
local name="$1"
local expected="$2"
local actual="$3"
echo -e "${RED}FAIL${NC}"
((TEST_FAIL++)) || true
if [[ -n ${expected} ]]; then
echo " Expected : ${expected}"
fi
if [[ -n ${actual} ]]; then
echo " Actual : ${actual}"
fi
if [[ ${TEST_CONTINUE_ON_FAILURE} != true ]]; then
__report_results
exit 1
fi
}
#==============================================================================
# Advanced Test Functions
#==============================================================================
test_file_exists() {
local name="$1"
local file_path="$2"
echo -n "* ${name}... "
set +e
if file_exists "${file_path}"; then
__test_pass "${name}"
else
__test_fail "${name}" "File should exist: file_path='${file_path}'" "File does not exist"
fi
set -e
}
test_file_contains() {
local name="$1"
local file_path="$2"
local expected_content="$3"
echo -n "* ${name}... "
if ! file_exists "${file_path}"; then
__test_fail "${name}" "File should exist and contain '${expected_content}'" "File does not exist: ${file_path}"
fi
if grep -q "${expected_content}" "${file_path}"; then
__test_pass "${name}"
else
local file_content
file_content=$(cat "${file_path}")
__test_fail "${name}" "File should contain '${expected_content}'" "File content: ${file_content}"
fi
}
#==============================================================================
# Test Utilities
#==============================================================================
create_test_file() {
local file_path="$1"
local content="$2"
local mode="${3:-644}"
local dir_path
dir_path=$(dirname "${file_path}")
ensure_dir "${dir_path}"
echo "${content}" >"${file_path}"
chmod "${mode}" "${file_path}"
log_debug "Created test file: ${file_path}"
}
#==============================================================================
# Test Organization Helpers
#==============================================================================
test_section() {
local section_name="$1"
print_section "Testing section: ${section_name}"
}
test_group() {
local group_name="$1"
echo_color cyan "Testing group: ${group_name}"
echo ""
}
#==============================================================================
# Test Reporting
#==============================================================================
__report_results() {
local end_time
end_time=$(date +%s)
local duration=$((end_time - TEST_START_TIME))
echo
print_section "Test Results Summary"
echo "Duration : ${duration}s"
echo "Total : $((TEST_PASS + TEST_FAIL))"
if [[ ${TEST_PASS} -gt 0 ]]; then
echo -e "Passed : ${GREEN}${TEST_PASS}${NC}"
else
echo -e "Passed : ${TEST_PASS}"
fi
if [[ ${TEST_FAIL} -gt 0 ]]; then
echo -e "Failed : ${RED}${TEST_FAIL}${NC}"
else
echo -e "Failed : ${TEST_FAIL}"
fi
if [[ ${TEST_SKIP} -gt 0 ]]; then
echo -e "Skipped : ${YELLOW}${TEST_SKIP}${NC}"
fi
echo
if [[ ${TEST_FAIL} -eq 0 ]]; then
log_success "All tests passed!"
return 0
else
log_error "${TEST_FAIL} test(s) failed"
return 1
fi
}
#==============================================================================
# Common Test Patterns
#==============================================================================
__test_help_option() {
local script_path="$1"
local script_name
script_name=$(basename "${script_path}")
test_case "help option (-h)" \
"${script_path} -h" \
"USAGE" \
true
test_case "help option (--help)" \
"${script_path} --help" \
"USAGE" \
true
}
__test_invalid_arguments() {
local script_path="$1"
test_case "invalid option" \
"${script_path} --invalid-option" \
"error" \
false
}
#==============================================================================
# Initialization
#==============================================================================
# Default test argument parsing
__parse_test_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-v | --verbose)
export TEST_VERBOSE=true
export VERBOSE=true
;;
--stop-on-failure)
export TEST_CONTINUE_ON_FAILURE=false
;;
-h | --help)
[[ $(type -t show_help) == function ]] && show_help
exit 0
;;
*)
break
;;
esac
shift
done
}
# Function to start the testing framework
start_tests() {
__setup "$@"
}
if [[ ${BASH_SOURCE[0]} == "${0}" ]]; then
echo "This script should be sourced, not executed directly."
# shellcheck disable=SC2016
echo 'Usage: source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"/test_lib.sh'
exit 1
fi
__get_script_path_dynamic() {
local test_filepath
local root_path
local script_filename
local script_filepath
# Try multiple ways of locating the test script to support different invocation styles
# Find the calling test script (first BASH_SOURCE entry that ends with _test.sh)
test_filepath=""
for ((i = 1; i < ${#BASH_SOURCE[@]}; i++)); do
if [[ ${BASH_SOURCE[i]} == *_test.sh ]]; then
test_filepath="${BASH_SOURCE[i]}"
break
fi
done
# Fallbacks if not found
test_filepath="${test_filepath:-${BASH_SOURCE[1]:-${BASH_SOURCE[0]:-$0}}}"
root_path="$(get_project_root 2>/dev/null || pwd)"
[[ ${TEST_VERBOSE} == true ]] && echo "DEBUG: test_filepath=${test_filepath} root_path=${root_path}" >&2
script_filename="$(basename "${test_filepath}" | sed 's/_test.sh/.sh/g')"
script_filepath="${root_path}/scripts/${script_filename}"
if [[ -f ${script_filepath} ]]; then
log_debug "Script path successfully found dynamically ${script_filepath}"
echo "${script_filepath}"
return 0
fi
# Fallback: search scripts/ for a matching script name
if [[ -d "${root_path}/scripts" ]]; then
local found
found=$(find "${root_path}/scripts" -maxdepth 1 -type f -name "${script_filename}" -print -quit 2>/dev/null || true)
if [[ -n ${found} ]]; then
log_debug "Script path found via fallback: ${found}"
echo "${found}"
return 0
fi
fi
fail "Script file not found: ${script_filepath}; set SCRIPT_PATH before sourcing test_lib.sh"
}
# Will be set by the test script - only initialize if not already set
[[ -z ${SCRIPT_PATH} ]] && SCRIPT_PATH="$(__get_script_path_dynamic)"
export SCRIPT_PATH
[[ -z ${TEMP_TEST_DIR} ]] && TEMP_TEST_DIR=""
export TEMP_TEST_DIR

View file

@ -1,73 +0,0 @@
#!/bin/bash
#==============================================================================
# update_md_tocs.sh
#==============================================================================
#
# DESCRIPTION:
# Automatically updates table of contents in all markdown files that contain
# doctoc markers. The script handles installation of doctoc if not present
# and applies consistent formatting across all markdown files.
#
# USAGE:
# update_md_tocs.sh [OPTIONS]
#
# FEATURES:
# - Auto-detects markdown files with doctoc markers
# - Installs doctoc if not present (requires npm)
# - Applies consistent settings across all files:
# * Excludes document title
# * Includes headers up to level 4
# * Uses GitHub-compatible links
# - Provides clear progress and error feedback
#
# TO ADD TOC TO A NEW FILE:
# Add these markers to your markdown:
# <!-- START doctoc generated TOC please keep comment here to allow auto update -->
# <!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
# <!-- doctoc --maxlevel 4 --no-title --notitle --github -->
#
# <!-- END doctoc -->
#
# DEPENDENCIES:
# - npm (for doctoc installation if needed)
# - doctoc (will be installed if missing)
#
# EXIT CODES:
# 0 - Success
# 1 - Missing dependencies or installation failure
#
# NOTES:
# - Only processes files containing doctoc markers
# - Preserves existing markdown content
# - Safe to run multiple times
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
# Install doctoc if not present
if ! command_exists doctoc; then
echo "doctoc not found. Installing..."
if ! command_exists npm; then
echo "Error: npm is required to install doctoc"
exit 1
fi
if ! npm_package_installed doctoc; then
echo "Installing doctoc globally..."
if ! npm install -g doctoc; then
fail "Failed to install doctoc"
fi
fi
fi
print_status "Updating table of contents in markdown files..."
# Find all markdown files that contain doctoc markers
find . -type f -name "*.md" -exec grep -l "START doctoc" {} \; | while read -r file; do
log_info "Processing: ${file}"
if ! doctoc --maxlevel 4 --no-title --notitle --github "${file}"; then
log_error "Failed to update TOC in ${file}"
fi
done
print_success "Table of contents update complete!"

View file

@ -1,23 +0,0 @@
#!/bin/bash
#==============================================================================
# update_trunkio.sh
#==============================================================================
#
# DESCRIPTION:
# Configures and updates the TrunkIO extension.
#
# USAGE:
# update_trunkio.sh
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
trunk upgrade
trunk check list --fix --print-failures
# TODO: Automatically enable any disabled linters except for cspell
# DISABLED_LINTERS="$(trunk check list | grep '◯' | grep "files" | awk -F ' ' '{print $2}')"
# for linter in $DISABLED_LINTERS; do
# echo "trunk check enable $linter;"
# done