This commit is contained in:
awalsh128 2025-09-11 15:35:18 -07:00
parent 07366a6d1e
commit c143000184
89 changed files with 5481 additions and 3173 deletions

37
.editorconfig Normal file
View file

@ -0,0 +1,37 @@
# EditorConfig is awesome: https://EditorConfig.org
# top-most EditorConfig file
root = true
# Unix-style newlines with a newline ending every file
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
charset = utf-8
# 2 space indentation
[*.{js,json,jsonc,yml,yaml,md,sh}]
indent_style = space
indent_size = 2
# Tab indentation (no size specified)
[*.go]
indent_style = tab
indent_size = 4
# Matches the exact files
[{Makefile,makefile,*.mk}]
indent_style = tab
indent_size = 4
# Shell scripts
[*.sh]
indent_style = space
indent_size = 2
max_line_length = 100
# Markdown files
[*.md]
max_line_length = 100
trim_trailing_whitespace = false

2
.env
View file

@ -1 +1,3 @@
GO111MODULE=auto
GO_TOOLCHAIN=go1.23.5
GO_VERSION=1.23.5

View file

@ -1,63 +0,0 @@
---
name: Bug Report
about: Create a report to help us improve or fix the action
title: "[BUG] "
labels: bug
assignees: 'awalsh128'
---
> **Note**: Please read about the limitation of [non-file dependencies](https://github.com/awalsh128/cache-apt-pkgs-action/blob/master/README.md#non-file-dependencies) before filing an issue.
## Description
A clear and concise description of what the bug is.
## Steps to Reproduce
### 1. Workflow Configuration
```yaml
# Replace with your workflow
```
### 2. Package List
```plaintext
# List your packages here
```
### 3. Environment Details
- Runner OS: [e.g., Ubuntu 22.04]
- Action version: [e.g., v2.0.0]
## Expected Behavior
A clear and concise description of what you expected to happen.
## Actual Behavior
What actually happened? Please include:
- Error messages
- Action logs
- Cache status (hit/miss)
## Debug Information
If possible, please run the action with debug mode enabled:
```yaml
with:
debug: true
```
And provide the debug output.
## Additional Context
- Are you using any specific package versions?
- Are there any special package configurations?
- Does the issue happen consistently or intermittently?
- Have you tried clearing the cache and retrying?

71
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View file

@ -0,0 +1,71 @@
---
name: Bug Report
about: Create a report to help us improve or fix the action
title: "[BUG] "
labels: bug
assignees: awalsh128
---
## Bug Description
A clear and concise description of what the bug is.
## Reproduction Steps
### Workflow Configuration
```yaml
# Paste your workflow configuration here
steps:
- name: Cache apt packages
uses: awalsh128/cache-apt-pkgs-action@latest
with:
packages: # your packages
version: 1.0
```
### Package List
```txt
# List the packages you're trying to cache
# Example: curl wget git
```
### Environment
- **Runner OS**: (e.g., ubuntu-22.04, ubuntu-20.04)
- **Action version**: (e.g., v1.4.2, latest)
- **Repository**: (if relevant)
## Expected vs Actual Behavior
**Expected**: What you expected to happen
**Actual**: What actually happened
## Logs and Error Messages
```txt
# Paste relevant logs, error messages, or debug output here
# Enable debug mode by adding: debug: true to your workflow step
```
## Cache Status
- [ ] Cache hit
- [ ] Cache miss
- [ ] Cache creation failed
- [ ] Other (please specify)
## Additional Information
- Does this happen consistently or intermittently?
- Have you tried clearing the cache?
- Are you using any specific package versions or configurations?
- Any relevant system dependencies?
## Checklist
- [ ] I have read the [non-file dependencies limitation](https://github.com/awalsh128/cache-apt-pkgs-action/blob/master/README.md#non-file-dependencies)
- [ ] I have searched existing issues for duplicates
- [ ] I have provided all requested information above

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View file

@ -0,0 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Documentation
url: https://github.com/awalsh128/cache-apt-pkgs-action/blob/master/README.md
about: Please check the documentation before filing an issue
- name: Discussions
url: https://github.com/awalsh128/cache-apt-pkgs-action/discussions
about: Ask questions and discuss ideas with the community

View file

@ -0,0 +1,55 @@
---
name: Feature Request
about: Suggest an idea or enhancement for this action
title: "[FEATURE] "
labels: enhancement
assignees: awalsh128
---
## Feature Summary
A clear and concise description of what you want to happen.
## Problem Statement
What problem would this feature solve? Is your feature request related to a problem you're experiencing?
```txt
Example: I'm frustrated when [specific scenario] because [reason]
```
## Proposed Solution
Describe the solution you'd like to see implemented.
## Alternatives Considered
Describe any alternative solutions or features you've considered.
## Use Case
Describe your specific use case and how this feature would benefit you and others.
```yaml
# Example workflow showing how the feature would be used
steps:
- name: Cache apt packages
uses: awalsh128/cache-apt-pkgs-action@latest
with:
packages: curl wget
# new-feature: value
```
## Implementation Notes
If you have ideas about how this could be implemented, please share them here.
## Additional Context
Add any other context, screenshots, or examples about the feature request here.
## Checklist
- [ ] I have searched existing issues and discussions for similar requests
- [ ] This feature would benefit the broader community, not just my specific use case
- [ ] I have provided a clear use case and rationale

View file

@ -1,45 +1,31 @@
name: Test Action
permissions:
contents: read
env:
DEBUG: false
on:
# Manual trigger with specific ref
# Manual trigger (no inputs allowed per Trunk rule)
workflow_dispatch:
inputs:
ref:
description: Branch, tag, SHA to test (for PRs use pull/ID/head)
required: true
type: string
debug:
description: Enable debug logging
type: boolean
required: false
default: false
# Automatic triggers
push:
branches: [dev-v2.0] # Test on pushes to dev branch
branches: [dev-v2.0] # Test on pushes to dev branch
paths:
- cmd/** # Only when action code changes
- internal/** # Only when action code changes
- cmd/** # Only when action code changes
- internal/** # Only when action code changes
- action.yml
- .github/workflows/action_tests.yml
pull_request:
branches: [dev-v2.0] # Test on PRs to dev branch
branches: [dev-v2.0] # Test on PRs to dev branch
paths:
- cmd/** # Only when action code changes
- internal/** # Only when action code changes
- cmd/** # Only when action code changes
- internal/** # Only when action code changes
- action.yml
- .github/workflows/action_tests.yml
# Environment configuration
env:
DEBUG: ${{ github.event.inputs.debug || false }}
# Test for overrides in built in shell options (regression issue 98).
SHELLOPTS: errexit:pipefail
# Use PR's SHA when testing a PR, otherwise use the ref provided
TEST_REF: ${{ github.event.pull_request.head.sha || github.event.inputs.ref || github.ref }}
jobs:
jobs:
list_all_versions:
runs-on: ubuntu-latest
name: List all package versions (including deps).
@ -48,8 +34,9 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
ref: ${{ env.TEST_REF }}
# Use the event ref/sha by default; do not accept user-controlled ref inputs
fetch-depth: 0
# Run the action from the checked out code
- name: Execute
id: execute
@ -57,7 +44,7 @@ jobs:
with:
packages: xdot=1.3-1
version: ${{ github.run_id }}-${{ github.run_attempt }}-list_all_versions
debug: ${{ env.DEBUG }}
debug: "false"
- name: Verify
if: |
steps.execute.outputs.cache-hit != 'false' ||
@ -82,8 +69,8 @@ jobs:
with:
packages: xdot rolldice
version: ${{ github.run_id }}-${{ github.run_attempt }}-list_versions
debug: ${{ env.DEBUG }}
- name: Verify
debug: "false"
- name: Verify
if: steps.execute.outputs.cache-hit != 'false' || steps.execute.outputs.package-version-list != 'rolldice=1.16-1build3,xdot=1.3-1'
run: |
echo "cache-hit = ${{ steps.execute.outputs.cache-hit }}"
@ -104,7 +91,7 @@ jobs:
with:
packages: xdot rolldice
version: ${{ github.run_id }}-${{ github.run_attempt }}-standard_workflow
debug: ${{ env.DEBUG }}
debug: "false"
- name: Verify
if: steps.execute.outputs.cache-hit != 'false'
run: |
@ -124,7 +111,7 @@ jobs:
with:
packages: xdot rolldice
version: ${{ github.run_id }}-${{ github.run_attempt }}-standard_workflow_install_with_new_version
debug: ${{ env.DEBUG }}
debug: "false"
- name: Verify
if: steps.execute.outputs.cache-hit != 'false'
run: |
@ -290,7 +277,7 @@ jobs:
with:
packages: xdot
version: ${{ github.run_id }}-${{ github.run_attempt }}-list-all-package-versions
debug: ${{ env.DEBUG }}
debug: false
regression_72_1:
runs-on: ubuntu-latest
@ -496,4 +483,4 @@ jobs:
with:
packages: libvips
version: ${{ github.run_id }}-${{ github.run_attempt }}-virtual_package
debug: ${{ env.DEBUG }}
debug: ${{ env.DEBUG }}

View file

@ -1,36 +1,36 @@
name: CI
permissions:
contents: read
on:
push:
branches: [dev-v2.0]
tags: ['v*'] # Trigger on version tags
tags: [v*] # Trigger on version tags
pull_request:
branches: [dev-v2.0]
schedule:
- cron: 0 0 * * * # Run at 00:00 UTC every day
- cron: 0 0 * * * # Run at 00:00 UTC every day
workflow_dispatch:
inputs:
debug:
description: Run in debug mode.
type: boolean
required: false
default: false
env:
DEBUG: ${{ github.event.inputs.debug || false }}
DEBUG: false
SHELLOPTS: errexit:pipefail
jobs:
build:
runs-on: ubuntu-latest
steps:
steps:
- uses: actions/checkout@v4
- name: Install Go
uses: actions/setup-go@v5
- name: Read Go version from .env
id: go-version
run: |
echo "go_version=$(grep '^GO_VERSION=' .env | cut -d '=' -f2)" >> $GITHUB_OUTPUT
- uses: actions/setup-go@v5
with:
go-version: "1.21"
go-version: ${{ steps.go-version.outputs.go_version }}
cache: true
- name: Install Go module dependencies
@ -38,19 +38,6 @@ jobs:
- name: Build
run: go build -v ./...
- name: Check file encodings
run: |
./scripts/check_utf8.sh
- name: Check file encoding changes
id: git-check
run: |
if [[ -n "$(git status --porcelain)" ]]; then
echo "::error::Some files are not in UTF-8 encoding. Please run ./scripts/check_utf8.sh locally and commit the changes."
git status --porcelain
exit 1
fi
- name: trunk.io Lint
uses: trunk-io/trunk-action@v1
@ -113,6 +100,3 @@ jobs:
fi
done
fi

45
.github/workflows/common.yml vendored Normal file
View file

@ -0,0 +1,45 @@
on:
workflow_dispatch:
permissions:
contents: read
pull-requests: write
jobs:
# TODO finish debugging check_and_fix_env.sh to start using this
modify-pr:
runs-on: ubuntu-latest
name: Check and fix PR
env:
REQUIRES_FIX: ""
SAFE_HEAD_REF: ${{ github.head_ref }}
steps:
- name: Checkout PR branch
uses: actions/checkout@v4
with:
ref: ${{ env.SAFE_HEAD_REF }}
- name: Check and fix if needed
shell: bash
run: |
set -euo pipefail
./scripts/check_and_fix_env.sh
status=$?
echo "REQUIRES_FIX=$status" >> $GITHUB_ENV
if [[ "$status" != "0" ]]; then
echo "$status changes were made, applying fix."
fi
- name: Commit and push changes
shell: bash
if: env.REQUIRES_FIX != '0'
run: |
set -euo pipefail
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git add .
git commit -m "Automated update from \"Check and fix PR\" workflow" || echo "No changes to commit"
git push origin HEAD:"${SAFE_HEAD_REF}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SAFE_HEAD_REF: ${{ env.SAFE_HEAD_REF }}

View file

@ -1,14 +1,12 @@
name: Release Version
permissions:
contents: read
on:
push:
tags: ['v2.*'] # Trigger on version tags >= 2.0.0
workflow_dispatch:
inputs:
version:
description: 'Version tag to update on pkg.go.dev (e.g. v2.0.0)'
required: true
type: string
tags: [v2.*] # Trigger on version tags >= 2.0.0
# manual dispatch removed to comply with Trunk rule (use push tags instead)
jobs:
update-pkg-go-dev:
@ -16,7 +14,7 @@ jobs:
steps:
- name: Update pkg.go.dev
run: |
VERSION=${{ github.ref_name || github.event.inputs.version }}
VERSION=${{ github.ref_name || github.sha }}
echo "Updating pkg.go.dev for version $VERSION"
curl -i https://proxy.golang.org/github.com/awalsh128/cache-apt-pkgs-action/@v/$VERSION.info
# Trigger a package load

6
.gitignore vendored
View file

@ -1,8 +1,8 @@
src/cmd/apt_query/apt_query*
# Environment files
.env.local
.env.*.local
# Don't ignore the main .env file
!.env
!.env
scripts/sandbox.sh

View file

@ -1,87 +1,81 @@
version: "2"
formatters:
enable:
- gofumpt # formats Go code
- goimports # formats imports and does everything that gofmt does
linters:
enable:
- asasalint # checks for pass []any as any in variadic func(...any)
- asciicheck # checks that your code does not contain non-ASCII identifiers
- bidichk # checks for dangerous unicode character sequences
- bodyclose # checks whether HTTP response body is closed successfully
- containedctx # detects struct contained context.Context field
- contextcheck # checks the function whether use a non-inherited context
- cyclop # checks function and package cyclomatic complexity
- decorder # checks declaration order and count of types, constants, variables and functions
- dogsled # checks assignments with too many blank identifiers
- dupl # checks code clone duplication
- asasalint # checks for pass []any as any in variadic func(...any)
- asciicheck # checks that your code does not contain non-ASCII identifiers
- bidichk # checks for dangerous unicode character sequences
- bodyclose # checks whether HTTP response body is closed successfully
- containedctx # detects struct contained context.Context field
- contextcheck # checks the function whether use a non-inherited context
- cyclop # checks function and package cyclomatic complexity
- decorder # checks declaration order and count of types, constants, variables and functions
- dogsled # checks assignments with too many blank identifiers
- dupl # checks code clone duplication
- durationcheck # checks for two durations multiplied together
- errcheck # checks unchecked errors
- errchkjson # checks types passed to encoding/json functions
- errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error
- errorlint # finds code that will cause problems with the error wrapping scheme
- execinquery # checks query string in Query function which reads your Go src files and warning it finds
- exhaustive # checks exhaustiveness of enum switch statements
- exportloopref # checks for pointers to enclosing loop variables
- errcheck # checks unchecked errors
- errchkjson # checks types passed to encoding/json functions
- errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error
- errorlint # finds code that will cause problems with the error wrapping scheme
- exhaustive # checks exhaustiveness of enum switch statements
- forcetypeassert # finds forced type assertions
- funlen # checks for long functions
- funlen # checks for long functions
- gocheckcompilerdirectives # validates go compiler directive comments
- gochecknoglobals # checks that no global variables exist
- gochecknoinits # checks that no init functions are present
- gocognit # computes and checks the cognitive complexity
- goconst # finds repeated strings that could be replaced by a constant
- gocritic # provides diagnostics that check for bugs, performance and style issues
- gocyclo # checks cyclomatic complexity
- godot # checks if comments end in a period
- godox # detects FIXME, TODO and other comment keywords
- goerr113 # checks the errors handling expressions
- gofmt # checks whether code was gofmt-ed
- gofumpt # checks whether code was gofumpt-ed
- goheader # checks is file header matches to pattern
- goimports # does everything that gofmt does + formats imports
- gomnd # detects magic numbers
- gochecknoinits # checks that no init functions are present
- gocognit # computes and checks the cognitive complexity
- goconst # finds repeated strings that could be replaced by a constant
- gocritic # provides diagnostics that check for bugs, performance and style issues
- gocyclo # checks cyclomatic complexity
- godot # checks if comments end in a period
- godox # detects FIXME, TODO and other comment keywords
- goheader # checks is file header matches to pattern
- gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod
- gomodguard # allows to specify a list of forbidden modules
- gomodguard # allows to specify a list of forbidden modules
- goprintffuncname # checks that printf-like functions are named with f at the end
- gosec # inspects source code for security problems
- gosimple # specializes in simplifying code
- govet # reports suspicious constructs
- grouper # analyzes expression groups
- importas # enforces consistent import aliases
- ineffassign # detects when assignments to existing variables are not used
- gosec # inspects source code for security problems
- govet # reports suspicious constructs
- grouper # analyzes expression groups
- importas # enforces consistent import aliases
- ineffassign # detects when assignments to existing variables are not used
- interfacebloat # checks the number of methods inside an interface
- ireturn # accept interfaces, return concrete types
- lll # reports long lines
- loggercheck # checks key value pairs for common logger libraries
- maintidx # measures the maintainability index of each function
- makezero # finds slice declarations with non-zero initial length
- misspell # finds commonly misspelled English words
- nakedret # finds naked returns
- nestif # reports deeply nested if statements
- nilerr # finds the code that returns nil even if it checks that error is not nil
- nilnil # checks that there is no simultaneous return of nil error and an invalid value
- nlreturn # checks for a new line before return and branch statements
- noctx # finds sending http request without context.Context
- nolintlint # reports ill-formed or insufficient nolint directives
- ireturn # accept interfaces, return concrete types
- lll # reports long lines
- loggercheck # checks key value pairs for common logger libraries
- maintidx # measures the maintainability index of each function
- makezero # finds slice declarations with non-zero initial length
- misspell # finds commonly misspelled English words
- nakedret # finds naked returns
- nestif # reports deeply nested if statements
- nilerr # finds the code that returns nil even if it checks that error is not nil
- nilnil # checks that there is no simultaneous return of nil error and an invalid value
- nlreturn # checks for a new line before return and branch statements
- noctx # finds sending http request without context.Context
- nolintlint # reports ill-formed or insufficient nolint directives
- nonamedreturns # reports all named returns
- nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL
- paralleltest # detects missing usage of t.Parallel() method in your Go test
- prealloc # finds slice declarations that could potentially be pre-allocated
- prealloc # finds slice declarations that could potentially be pre-allocated
- predeclared # finds code that shadows one of Go's predeclared identifiers
- promlinter # checks Prometheus metrics naming via promlint
- reassign # checks that package variables are not reassigned
- revive # fast, configurable, extensible, flexible, and beautiful linter for Go
- promlinter # checks Prometheus metrics naming via promlint
- reassign # checks that package variables are not reassigned
- revive # fast, configurable, extensible, flexible, and beautiful linter for Go
- rowserrcheck # checks whether Err of rows is checked successfully
- sqlclosecheck # checks that sql.Rows and sql.Stmt are closed
- staticcheck # comprehensive checks for bugs and inefficiencies
- stylecheck # replacement for golint
- tenv # detects using os.Setenv instead of t.Setenv since Go1.17
- staticcheck # comprehensive checks for bugs and inefficiencies
- testableexamples # checks if examples are testable
- testpackage # makes you use a separate _test package
- thelper # detects golang test helpers without t.Helper()
- tparallel # detects inappropriate usage of t.Parallel()
- typecheck # like the front-end of a Go compiler
- unconvert # removes unnecessary type conversions
- unparam # reports unused function parameters
- unused # checks for unused constants, variables, functions and types
- testpackage # makes you use a separate _test package
- thelper # detects golang test helpers without t.Helper()
- tparallel # detects inappropriate usage of t.Parallel()
- unconvert # removes unnecessary type conversions
- unparam # reports unused function parameters
- unused # checks for unused constants, variables, functions and types
- usestdlibvars # detects the possibility to use variables/constants from the Go standard library
- varnamelen # checks that the length of a variable's name matches its scope
- varnamelen # checks that the length of a variable's name matches its scope
- wastedassign # finds wasted assignment statements
- whitespace # detects leading and trailing whitespace
- whitespace # detects leading and trailing whitespace

View file

@ -1,2 +1,54 @@
# Prettier friendly markdownlint config (all formatting rules disabled)
# Enable all rules by default
default: true
# Markdown linting configuration with all rules enabled
extends: markdownlint/style/prettier
# MD003 heading-style - Header style
MD003:
style: atx # Use # style headers
# MD004 ul-style - Unordered list style
MD004:
style: consistent # Be consistent with the first list style used
# MD012 no-multiple-blanks - No multiple consecutive blank lines
MD012:
maximum: 1
# MD013 line-length - Line length
MD013:
line_length: 100
code_blocks: false
tables: false
# MD024 no-duplicate-header - No duplicate headers
MD024:
siblings_only: true # Allow duplicates if they're not siblings
# MD026 no-trailing-punctuation - No trailing punctuation in header
MD026:
punctuation: .,;:!。,;:!
# MD029 ol-prefix - Ordered list item prefix
MD029:
style: one_or_ordered
# MD033 no-inline-html - No inline HTML
MD033:
allowed_elements: []
# MD034 no-bare-urls - No bare URLs
MD034: true
# MD035 hr-style - Horizontal rule style
MD035:
style: ---
# MD041 first-line-heading - First line should be a top-level header
MD041:
level: 1
# MD046 code-block-style - Code block style
MD046:
style: fenced

View file

@ -0,0 +1,4 @@
plugins:
remark-preset-lint-consistent: true
remark-preset-lint-recommended: true
remark-lint-list-item-indent: true

View file

@ -0,0 +1,7 @@
enable=all
source-path=SCRIPTDIR
disable=SC1090
disable=SC1091
disable=SC2154
disable=SC2310
disable=SC2312

5
.trunk/configs/.vale.ini Normal file
View file

@ -0,0 +1,5 @@
[formats]
markdoc = md
[*.md]
BasedOnStyles = Vale

View file

@ -0,0 +1,91 @@
# Copyright 2021 Praetorian Security, Inc.
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# GoKart analyzers configuration
# Uncomment analyzers section below to create a new vulnerability type
# analyzers:
# # Each entry specifies a vulnerability type.
# # Name of the vulnerability:
# Test Sink:
# # Description of this vulnerability
# doc: Writing data to Printf()
# # Message displayed when this vulnerability is found
# message: Test Sink reachable by user input
# # List of vulnerable functions used to identify this vulnerability
# vuln_calls:
# # Package name
# log:
# # Function name
# - Printf
# Each entry specifies a source that should be considered untrusted
# If the package already exists in the sources section, add the variable/function/type underneath
# Each package can contain multiple vulnerable sources.
sources:
# Sources that are defined in Go documentation as a variable go here (note: these variables will have an SSA type of Global).
variables:
os:
- Args
# Sources that are defined in Go documentation as a function go here.
functions:
flag:
- Arg
- Args
os:
- Environ
- File
crypto/tls:
- LoadX509KeyPair
- X509KeyPair
os/user:
- Lookup
- LookupId
- Current
crypto/x509:
- Subjects
io:
- ReadAtLeast
- ReadFull
database/sql:
- Query
- QueryRow
bytes:
- String
- ReadBytes
- ReadByte
bufio:
- Text
- Bytes
- ReadString
- ReadSlice
- ReadRune
- ReadLine
- ReadBytes
- ReadByte
archive/tar:
- Next
- FileInfo
- Header
net/url:
- ParseQuery
- ParseUriRequest
- Parse
- Query
# Sources that are defined in Go documentation as a type go here (note: adding types will consider all functions that use that type to be tainted).
types:
net/http:
- Request

View file

@ -0,0 +1,4 @@
version: "0.2"
# Suggestions can sometimes take longer on CI machines,
# leading to inconsistent results.
suggestionsTimeout: 5000 # ms

View file

@ -3,34 +3,65 @@
version: 0.1
cli:
version: 1.25.0
# Trunk provides extensibility via plugins. (https://docs.trunk.io/plugins)
plugins:
sources:
- id: trunk
ref: v1.7.1
ref: v1.7.2
uri: https://github.com/trunk-io/plugins
# Many linters and tools depend on runtimes - configure them here. (https://docs.trunk.io/runtimes)
runtimes:
enabled:
- node@22.16.0
- python@3.10.8
- go@1.21.0
# This is the section where you manage your linters. (https://docs.trunk.io/check/configuration)
lint:
disabled:
- cspell
enabled:
- yamlfmt@0.17.2
- vale@3.12.0
- trunk-toolbox@0.5.4
- trufflehog-git@3.90.6
- snyk@1.1295.0
- remark-lint@12.0.1
- pre-commit-hooks@6.0.0
- nancy@1.0.51
- markdownlint-cli2@0.18.1
- markdown-table-prettify@3.7.0
- markdown-link-check@3.13.7
- ls-lint@2.3.1
- golangci-lint@1.64.8
- gokart@0.5.1
- goimports@0.9.1
- gofumpt@0.5.0
- gitleaks@8.28.0
- deno@2.5.0
- biome@2.2.4
- codespell@2.4.1
- kube-linter@0.7.2
- golines@0.13.0
- markdownlint@0.45.0
- semgrep@1.136.0
- shellcheck@0.11.0
- actionlint@1.7.7
- checkov@3.2.470
- dotenv-linter@3.3.0
- git-diff-check
- gofmt@1.20.4
- golangci-lint@1.54.2
- shellcheck@0.10.0
- golangci-lint2@2.4.0
- isort@6.0.1
- markdownlint@0.45.0
- osv-scanner@2.2.2
- oxipng@9.1.5
- prettier@3.6.2
- shfmt@3.6.0
- trivy@0.66.0
- trufflehog@3.90.6
- yamllint@1.37.1
- cspell@0.27.1
definitions:
- name: cspell
files: [".md$"]
actions:
enabled:
- trunk-upgrade-available
- trunk-fmt-pre-commit
disabled:
- trunk-announce
- trunk-check-pre-push
# - trunk-fmt-pre-commit
- trunk-upgrade-available

5
.version-info Normal file
View file

@ -0,0 +1,5 @@
# Version information for cache-apt-pkgs-action
GO_VERSION=1.24
TOOLCHAIN_VERSION=
SYSPKG_VERSION=v0.1.5
EXPORT_DATE=2025-09-07 11:53:25

6
.version-info.json Normal file
View file

@ -0,0 +1,6 @@
{
"goVersion": "1.24",
"toolchainVersion": "",
"syspkgVersion": "v0.1.5",
"exportDate": "2025-09-07 11:53:25"
}

248
.vscode/cspell.json vendored
View file

@ -1,248 +0,0 @@
{
"version": "0.2",
"language": "en",
"dictionaries": [
"go",
"softwareTerms"
],
"enableFiletypes": [
"go",
"md",
"yml",
],
"words": [
"arangodb",
"arangolint",
"asasalint",
"asciicheck",
"awalsh",
"Axxxx",
"bidichk",
"bodyclose",
"canonicalheader",
"cli",
"cmdflags",
"cmdtesting",
"codecov",
"containedctx",
"contextcheck",
"copyloopvar",
"covermode",
"coverprofile",
"createreplaylogs",
"cwd",
"cyclo",
"cyclop",
"davecgh",
"DCMAKE",
"decorder",
"depguard",
"difflib",
"dmbeddedstouctfieldcheck",
"dockerdesktop",
"dpkg",
"dupl",
"dupword",
"durationcheck",
"eamodio",
"embeddedstructfieldcheck",
"errcheck",
"errchkjson",
"errexit",
"errname",
"Errorf",
"errorlint",
"erxygen",
"esac",
"Etz",
"exhaustruct",
"exptostd",
"fatcontext",
"fieldalignment",
"fileset",
"finkgulinter",
"fmt",
"folded",
"forbidigo",
"forcetypeassert",
"forcorder",
"funcorder",
"funlen",
"Fyf",
"ginkgolinter",
"gocheckcompilerdirectives",
"gochecknoglobals",
"gochecknoinits",
"gochecksumtype",
"gocognit",
"goconst",
"gocritic",
"gocyclo",
"godot",
"godox",
"gofmt",
"gofrs",
"gofumpt",
"goheader",
"goimports",
"golangci",
"golines",
"golint",
"gomega",
"gomoddirectives",
"gomodguard",
"gonlen",
"gopkg",
"goprintffuncname",
"GOROOT",
"gosec",
"gosmopolitan",
"gostd",
"govet",
"graphviz",
"iface",
"importas",
"inamedparam",
"ineffassign",
"interfacebloat",
"intrange",
"ireturn",
"jmoiron",
"jre",
"kitlog",
"klog",
"libasound",
"libatk",
"libatspi",
"libboost",
"libcups",
"libdrm",
"libfuse",
"libgbm",
"libgl",
"libgtk",
"libnspr",
"libnss",
"libosmesa",
"libtk",
"libvips",
"libxcomposite",
"libxdamage",
"libxfixes",
"libxkbcommon",
"libxrandr",
"loggercheck",
"logr",
"lqez",
"maintidx",
"makezero",
"mapstructure",
"mitchellh",
"mscgen",
"musttag",
"myapp",
"mypackage",
"nakedret",
"nestif",
"nginx",
"nilerr",
"nilness",
"nilnesserr",
"nilnil",
"nlreturn",
"noctx",
"noinlineerr",
"nolint",
"nolintlint",
"nonamedreturns",
"nonexistentpackagename",
"nosprintfhostport",
"oipefail",
"oneapi",
"onndoc",
"paeapi",
"pandoc",
"paralleltest",
"perfsprint",
"pipefail",
"pkgs",
"pmealloc",
"postgresql",
"postinst",
"ppenapi",
"prealloc",
"predeclared",
"preinst",
"prezard",
"promlint",
"promlinter",
"proto",
"protogetter",
"reassign",
"recvcheck",
"redis",
"replayfile",
"replayfilename",
"Reymers",
"rolldice",
"rowserrcheck",
"rwa",
"sarama",
"SCRIPTDIR",
"shellcheck",
"SHELLOPTS",
"shfmt",
"sidechannel",
"sloglint",
"SLXVDP",
"softprops",
"spancheck",
"spew",
"Sprintf",
"sqlclosecheck",
"sqlx",
"staticcheck",
"stderr",
"stdout",
"strconcat",
"strconv",
"stretchr",
"stretchside",
"strs",
"Submatch",
"swaggo",
"syspkg",
"tagalign",
"tagliatelle",
"testableexamples",
"testcontainers",
"testdata",
"testifylint",
"testpackage",
"thelper",
"toolchain",
"tparallel",
"Typeflag",
"unconvert",
"underef",
"unparam",
"unparsedflags",
"untar",
"usestdlibvars",
"usetesting",
"usr",
"varnamelen",
"wastedassign",
"wayou",
"whitespace",
"wrapcheck",
"xdot",
"yamllint",
"zerolog",
"zerologlint"
],
"ignorePaths": [
"dist",
".git"
]
}

View file

@ -1,10 +1,9 @@
{
"recommendations": [
"golang.go", // Official Go extension
"trunk.io", // trunk.io Linters
"trunk.io", // trunk.io Linters
"wayou.vscode-todo-highlight", // Highlight TODOs
"streetsidesoftware.code-spell-checker", // Spell checking
"eamodio.gitlens", // Git integration
"github.vscode-github-actions", // GitHub Actions support
"github.vscode-github-actions" // GitHub Actions support
]
}
}

8
.vscode/launch.json vendored
View file

@ -1,12 +1,12 @@
{
{
"version": "0.2.0",
"configurations": [
"configurations": [
{
"name": "Launch Package",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${fileDirname}",
"program": "${fileDirname}"
}
]
}
}

51
.vscode/settings.json vendored
View file

@ -1,28 +1,41 @@
{
"cSpell.enabled": false,
"editor.rulers": [
100
],
"editor.tabSize": 2,
"editor.insertSpaces": true,
"editor.detectIndentation": false,
"editor.rulers": [100],
"editor.formatOnSave": true,
"editor.formatOnPaste": true,
"editor.formatOnType": true,
"editor.wordWrap": "wordWrapColumn",
"editor.wordWrapColumn": 100,
"editor.wrappingIndent": "indent",
"[go]": {
"editor.defaultFormatter": "trunk.io",
"editor.formatOnSave": true
"editor.defaultFormatter": "trunk.io"
},
"[json]": {
"editor.defaultFormatter": "vscode.json-language-features",
"editor.formatOnSave": true
"[shellscript]": {
"editor.defaultFormatter": "trunk.io"
},
"[jsonc]": {
"editor.defaultFormatter": "vscode.json-language-features",
"editor.formatOnSave": true
"[bash]": {
"editor.defaultFormatter": "trunk.io"
},
"[shell]": {
"editor.defaultFormatter": "trunk.io",
"editor.formatOnSave": true
"[json,jsonc]": {
"editor.defaultFormatter": "vscode.json-language-features"
},
"[shfmt]": {
"editor.defaultFormatter": "trunk.io"
},
"[yaml]": {
"editor.defaultFormatter": "trunk.io",
"editor.formatOnSave": true
"editor.defaultFormatter": "trunk.io"
},
"editor.formatOnSave": true
}
"cSpell.enabled": false,
"editor.codeActionsOnSave": {
"source.fixAll.shellcheck": "explicit"
},
"workbench.editorAssociations": {
"git-index": "default",
"git-show": "default"
},
"files.readonlyInclude": {},
"workbench.editor.defaultBinaryEditor": "default",
"workbench.editor.enablePreviewFromCodeNavigation": false
}

26
.vscode/tasks.json vendored
View file

@ -5,18 +5,12 @@
"label": "go: build",
"type": "shell",
"command": "go",
"args": [
"build",
"-v",
"./..."
],
"args": ["build", "-v", "./..."],
"group": {
"kind": "build",
"isDefault": true
},
"problemMatcher": [
"$go"
],
"problemMatcher": ["$go"],
"presentation": {
"reveal": "always",
"panel": "shared",
@ -28,18 +22,12 @@
"label": "go: test",
"type": "shell",
"command": "go",
"args": [
"test",
"-v",
"./..."
],
"args": ["test", "-v", "./..."],
"group": {
"kind": "test",
"isDefault": true
},
"problemMatcher": [
"$go"
],
"problemMatcher": ["$go"],
"presentation": {
"reveal": "always",
"panel": "shared",
@ -59,9 +47,7 @@
"-covermode=atomic",
"./..."
],
"problemMatcher": [
"$go"
],
"problemMatcher": ["$go"],
"presentation": {
"reveal": "always",
"panel": "shared",
@ -70,4 +56,4 @@
}
}
]
}
}

693
CLAUDE.md
View file

@ -10,6 +10,7 @@
- [3. Error Handling](#3-error-handling)
- [4. API Design](#4-api-design)
- [5. Documentation Practices](#5-documentation-practices)
- [Go Code Documentation Standards](#go-code-documentation-standards)
- [Code Documentation](#code-documentation)
- [Project Documentation](#project-documentation)
- [6. Testing Strategy](#6-testing-strategy)
@ -41,6 +42,10 @@
- [Further Guidance](#further-guidance)
- [Bash Scripts](#bash-scripts)
- [Script Testing](#script-testing)
- [Test Framework Architecture Pattern](#test-framework-architecture-pattern)
- [Script Argument Parsing Pattern](#script-argument-parsing-pattern)
- [Centralized Configuration Management](#centralized-configuration-management)
- [Implementation Status](#implementation-status)
- [Testing Principles](#testing-principles)
- [1. Test Organization Strategy](#1-test-organization-strategy)
- [2. Code Structure](#2-code-structure)
@ -69,6 +74,7 @@
### 2. Code Style and Formatting
- Use 2 spaces for indentation, never tabs
- Consistent naming conventions (e.g., CamelCase for exported names)
- Keep functions small and focused
- Use meaningful variable names
@ -85,15 +91,92 @@
### 4. API Design
- Make zero values useful
- Keep interfaces small and focused, observing the [single responsibility principle](https://en.wikipedia.org/wiki/Single-responsibility_principle)
- Observe the [open-closed principle](https://en.wikipedia.org/wiki/Open%E2%80%93closed_principle) so that it is open for extension but closed to modification
- Observe the [dependency inversion principle](https://en.wikipedia.org/wiki/Dependency_inversion_principle) to keep interfaces loosely coupled
- Keep interfaces small and focused, observing the
[single responsibility principle](https://en.wikipedia.org/wiki/Single-responsibility_principle)
- Observe the
[open-closed principle](https://en.wikipedia.org/wiki/Open%E2%80%93closed_principle)
so that it is open for extension but closed to modification
- Observe the
[dependency inversion principle](https://en.wikipedia.org/wiki/Dependency_inversion_principle)
to keep interfaces loosely coupled
- Design for composition over inheritance
- Use option patterns for complex configurations
- Make dependencies explicit
### 5. Documentation Practices
#### Go Code Documentation Standards
Following the official [Go Documentation Guidelines](https://go.dev/blog/godoc):
1. **Package Documentation**
- Every package must have a doc comment immediately before the `package`
statement
- Format: `// Package xyz ...` (first sentence) followed by detailed
description
- First sentence should be a summary beginning with `Package xyz`
- Follow with a blank line and detailed documentation
- Include package-level examples if helpful
2. **Exported Items Documentation**
- Document all exported (capitalized) names
- Comments must begin with the name being declared
- First sentence should be a summary
- Omit the subject when it's the thing being documented
- Use article "a" for types that could be one of many, "the" for singletons
Examples:
```go
// List represents a singly-linked list.
// A zero List is valid and represents an empty list.
type List struct {}
// NewRing creates a new ring buffer with the given size.
func NewRing(size int) *Ring {}
// Append adds the elements to the list.
// Blocks if buffer is full.
func (l *List) Append(elems ...interface{}) {}
```
3. **Documentation Style**
- Write clear, complete sentences
- Begin comments with a capital letter
- End sentences with punctuation
- Keep comments up to date with code changes
- Focus on behavior users can rely on, not implementation
- Document synchronization assumptions for concurrent access
- Document any special error conditions or panics
4. **Examples**
- Add examples for complex types or functions using `Example` functions
- Include examples in package docs for important usage patterns
- Make examples self-contained and runnable
- Use realistic data and common use cases
- Show output in comments when examples print output:
```go
func ExampleHello() {
fmt.Println("Hello")
// Output: Hello
}
```
5. **Doc Comments Format**
- Use complete sentences and proper punctuation
- Add a blank line between paragraphs
- Use lists and code snippets for clarity
- Include links to related functions/types where helpful
- Document parameters and return values implicitly in the description
- Break long lines at 80 characters
6. **Quality Control**
- Run `go doc` to verify how documentation will appear
- Review documentation during code reviews
- Keep examples up to date and passing
- Update docs when changing behavior
#### Code Documentation
- Write package documentation with examples
@ -105,17 +188,23 @@
Example:
```go
// Package cache provides a caching mechanism for apt packages.
// It supports both saving and restoring package states, making it
// useful for CI/CD environments where package installation is expensive.
// key.go
//
// Description:
//
// Provides types and functions for managing cache keys, including serialization, deserialization,
// and validation of package metadata.
//
// Package: cache
//
// Example usage:
//
// cache := NewCache()
// err := cache.SavePackages(packages)
// if err != nil {
// // handle error
// }
// // Create a new cache key
// key := cache.NewKey(packages, "v1.0", "v2", "amd64")
//
// // Get the hash of the key
// hash := key.Hash()
// fmt.Printf("Key hash: %x\n", hash)
package cache
```
@ -202,7 +291,7 @@ func main() {
defer f.Close()
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
// Your code here
}
```
@ -261,7 +350,7 @@ func main() {
defer f.Close()
trace.Start(f)
defer trace.Stop()
// Your code here
}
```
@ -372,22 +461,27 @@ go tool pprof -http=:8080 cpu.prof
- Minimize the amount of shell code and put complex logic in the Go code
- Use clear step `id` names that use dashes between words and active verbs
- Avoid hard-coded API URLs like https://api.github.com. Use environment variables (GITHUB_API_URL for REST API, GITHUB_GRAPHQL_URL for GraphQL) or the @actions/github toolkit for dynamic URL handling
- Avoid hard-coded API URLs like <https://api.github.com>. Use environment
variables (GITHUB_API_URL for REST API, GITHUB_GRAPHQL_URL for GraphQL) or the
@actions/github toolkit for dynamic URL handling
##### Release Management
- Use semantic versioning for releases (e.g., v1.0.0)
- Recommend users reference major version tags (v1) instead of the default branch for stability.
- Recommend users reference major version tags (v1) instead of the default
branch for stability.
- Update major version tags to point to the latest release
##### Create a README File
Include a detailed description, required/optional inputs and outputs, secrets, environment variables, and usage examples
Include a detailed description, required/optional inputs and outputs, secrets,
environment variables, and usage examples
##### Testing and Automation
- Add workflows to test your action on feature branches and pull requests
- Automate releases using workflows triggered by publishing or editing a release.
- Automate releases using workflows triggered by publishing or editing a
release.
##### Community Engagement
@ -398,24 +492,26 @@ Include a detailed description, required/optional inputs and outputs, secrets, e
##### Further Guidance
For more details, visit:
- https://docs.github.com/en/actions/how-tos/create-and-publish-actions/manage-custom-actions
- https://docs.github.com/en/actions/how-tos/create-and-publish-actions/release-and-maintain-actions
- <https://docs.github.com/en/actions/how-tos/create-and-publish-actions/manage-custom-actions>
- <https://docs.github.com/en/actions/how-tos/create-and-publish-actions/release-and-maintain-actions>
### Bash Scripts
Project scripts should follow these guidelines:
- Create scripts in the `scripts` directory (not `tools`)
- Add new functionality to the `scripts/menu.sh` script for easy access
- Follow formatting rules in
[Shellcheck](https://github.com/koalaman/shellcheck/wiki)
- Follow style guide rules in
[Google Bash Style Guide](https://google.github.io/styleguide/shellguide)
- Include proper error handling and exit codes
- Use `scripts/lib.sh` whenever for common functionality
- Use imperative verb form for script names:
- Good: `export_version.sh`, `build_package.sh`, `run_tests.sh`
- Bad: `version_export.sh`, `package_builder.sh`, `test_runner.sh`
- Follow consistent naming conventions:
- Use lowercase with underscores
- Start with a verb in imperative form
- Use clear, descriptive names
- Create scripts in the `scripts` directory (not `tools`)
- Make scripts executable (`chmod +x`)
- Include proper error handling and exit codes
- Add new functionality to the `scripts/menu.sh` script for easy access
- Add usage information (viewable with `-h` or `--help`)
Script Header Format:
@ -424,7 +520,7 @@ Script Header Format:
#==============================================================================
# script_name.sh
#==============================================================================
#
#
# DESCRIPTION:
# Brief description of what the script does.
# Additional details if needed.
@ -440,7 +536,8 @@ Script Header Format:
#==============================================================================
```
Every script should include this header format at the top, with all sections filled out appropriately. The header provides:
Every script should include this header format at the top, with all sections
filled out appropriately. The header provides:
- Clear identification of the script
- Description of its purpose and functionality
@ -450,7 +547,8 @@ Every script should include this header format at the top, with all sections fil
#### Script Testing
All scripts must have corresponding tests in the `scripts/tests` directory using the common test library:
All scripts must have corresponding tests in the `scripts/tests` directory using
the common test library:
1. **Test File Structure**
- Name test files as `<script_name>_test.sh`
@ -458,215 +556,279 @@ All scripts must have corresponding tests in the `scripts/tests` directory using
- Make test files executable (`chmod +x`)
- Source the common test library (`test_lib.sh`)
2. **Common Test Library**
The `test_lib.sh` library provides a standard test framework:
```bash
# Source the test library
source "$(dirname "$0")/test_lib.sh"
# Library provides:
- Color output (GREEN, RED, BLUE, NC, BOLD)
- Test counting (PASS, FAIL)
- Temporary directory management
- Standard argument parsing
- Common test functions
```
Key Functions:
- `test_case "name" "command" "expected_output" "should_succeed"`
- `print_header "text"` - Print bold header
- `print_section "text"` - Print section header in blue
- `print_info "text"` - Print verbose info
- `setup_test_env` - Create temp directory
- `cleanup_test_env` - Clean up resources
- `create_test_file "path" "content" "mode"` - Create test file
- `is_command_available "cmd"` - Check if command exists
- `wait_for_condition "cmd" timeout interval` - Wait for condition
- `report_results` - Print test summary
2. **Common Test Library** The `test_lib.sh` library provides a standard test
framework. See the `scripts/tests/template_test.sh` for examples of how to
set up one.
3. **Test Organization**
- Group related test cases into sections
- Test each command/flag combination
- Test error conditions explicitly
- Include setup and teardown if needed
- Use temporary directories for file operations
- Clean up resources in trap handlers
4. **Test Coverage**
- Test main functionality
- Test error conditions
- Test input validation
- Test edge cases
- Test each supported flag/option
Standard test framework:
```bash
#!/bin/bash
# Colors for test output
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
# Test counters
PASS=0
FAIL=0
# Main test case function
function test_case() {
local name=$1
local cmd=$2
local expected_output=$3
local should_succeed=${4:-true}
echo -n "Testing $name... "
# Run command and capture output
local output
if [[ $should_succeed == "true" ]]; then
output=$($cmd 2>&1)
local status=$?
if [[ $status -eq 0 && $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
else
output=$($cmd 2>&1) || true
if [[ $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
fi
echo -e "${RED}FAIL${NC}"
echo " Expected output to contain: '$expected_output'"
echo " Got: '$output'"
((FAIL++))
return 0
}
# Create a temporary directory for test files
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT
# Test sections should be organized like this:
echo "Running script_name.sh tests..."
echo "------------------------------"
# Section 1: Input Validation
test_case "no arguments provided" \
"./script_name.sh" \
"error: arguments required" \
false
# Section 2: Main Functionality
test_case "basic operation" \
"./script_name.sh arg1" \
"success" \
true
# Report results
echo
echo "Test Results:"
echo "Passed: $PASS"
echo "Failed: $FAIL"
exit $FAIL
```
Example test file structure:
```bash
#!/bin/bash
# Test script for example_script.sh
set -e
# Get the directory containing this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
# Create a temporary directory for test files
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT
# Test helper functions
setup_test_env() {
# Setup code here
}
teardown_test_env() {
# Cleanup code here
}
# Individual test cases
test_main_functionality() {
echo "Testing main functionality..."
# Test code here
}
test_error_handling() {
echo "Testing error handling..."
# Test code here
}
# Run all tests
echo "Running example_script.sh tests..."
setup_test_env
test_main_functionality
test_error_handling
teardown_test_env
echo "All tests passed!"
```
1. **CI Integration**
5. **CI Integration**
- Tests run automatically in CI
- Tests must pass before merge
- Test execution is part of the validate-scripts job
- Test failures block PR merges
Example script structure:
##### Test Framework Architecture Pattern
The improved test framework follows this standardized pattern for all script
tests:
**Test File Template:**
```bash
#!/bin/bash
# Script Name: example_script.sh
# Description: Brief description of what the script does
# Usage: ./example_script.sh [options] <arguments>
# Author: Your Name
# Date: YYYY-MM-DD
#==============================================================================
# script_name_test.sh
#==============================================================================
#
# DESCRIPTION:
# Test suite for script_name.sh functionality.
# Brief description of what aspects are tested.
#
# USAGE:
# script_name_test.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
set -e # Exit on error
# Set up the script path we want to test
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
export SCRIPT_PATH="$SCRIPT_DIR/../script_name.sh"
# Help function
show_help() {
cat << EOF
Usage: $(basename "$0") [options] <arguments>
# Source the test framework
source "$SCRIPT_DIR/test_lib.sh"
Options:
-h, --help Show this help message
-v, --version Show version information
# Define test functions
run_tests() {
test_section "Help and Usage"
Arguments:
<input> Description of input argument
EOF
test_case "shows help message" \
"--help" \
"USAGE:" \
true
test_case "shows error for invalid option" \
"--invalid-option" \
"Unknown option" \
false
test_section "Core Functionality"
# Add more test cases here
}
# Parse arguments
while [[ $# -gt 0 ]]; do
# Start the test framework and run tests
start_tests "$@"
run_tests
```
**Key Framework Features:**
- **SCRIPT_PATH Setup**: Test files must set `SCRIPT_PATH` before sourcing
`test_lib.sh` to avoid variable conflicts
- **Function-based Test Organization**: Tests are organized in a `run_tests()`
function called after framework initialization
- **Consistent Test Sections**: Use `test_section` to group related tests with
descriptive headers
- **Standard Test Case Pattern**:
`test_case "name" "args" "expected_output" "should_succeed"`
- **Framework Integration**: Call `start_tests "$@"` before running tests to
handle argument parsing and setup
##### Script Argument Parsing Pattern
All scripts should implement consistent argument parsing following this pattern:
```bash
main() {
# Parse command line arguments first
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
show_help
exit 0
;;
*)
# Handle other arguments
;;
-v | --verbose)
export VERBOSE=true
;;
-h | --help)
cat << 'EOF'
USAGE:
script_name.sh [OPTIONS]
DESCRIPTION:
Brief description of what the script does.
Additional details if needed.
OPTIONS:
-v, --verbose Enable verbose output
-h, --help Show this help message
EOF
exit 0
;;
*)
echo "Unknown option: $1" >&2
echo "Use --help for usage information." >&2
exit 1
;;
esac
shift
done
done
# Main script logic here
# Script main logic here
}
main "$@"
```
**Key Argument Parsing Features:**
- **Consistent Options**: All scripts support `-v/--verbose` and `-h/--help`
- **Early Help Exit**: Help is displayed immediately without running script
logic
- **Error Handling**: Unknown options produce helpful error messages
- **Inline Help Text**: Help is embedded in the script using heredoc syntax
##### Centralized Configuration Management
The project implements centralized version management using the `.env` file as a
single source of truth:
**Configuration Structure:**
```bash
# .env file contents
GO_VERSION=1.23.4
GO_TOOLCHAIN=go1.23.4
```
**GitHub Actions Integration:**
```yaml
# .github/workflows/ci.yml pattern
jobs:
setup:
runs-on: ubuntu-latest
outputs:
go-version: ${{ steps.env.outputs.go-version }}
steps:
- uses: actions/checkout@v4
- id: env
run: |
source .env
echo "go-version=$GO_VERSION" >> $GITHUB_OUTPUT
dependent-job:
needs: setup
runs-on: ubuntu-latest
steps:
- uses: actions/setup-go@v5
with:
go-version: ${{ needs.setup.outputs.go-version }}
```
**Synchronization Script Pattern:**
- `scripts/sync_go_version.sh` reads `.env` and updates `go.mod` accordingly
- Ensures consistency between environment configuration and Go module
requirements
- Can be extended for other configuration synchronization needs
##### Implementation Status
**✅ Implemented Patterns:**
The following scripts have been updated with the standardized patterns:
1. **scripts/export_version.sh** - Complete implementation:
- ✅ Argument parsing with `--help` and `--verbose`
- ✅ Proper error handling and logging
- ✅ Comprehensive test suite in `scripts/tests/export_version_test.sh`
- ✅ Function-based test organization
2. **scripts/setup_dev.sh** - Complete implementation:
- ✅ Argument parsing with `--help` and `--verbose`
- ✅ Script-specific help documentation
- ✅ Error handling for unknown options
- ✅ Comprehensive test suite in `scripts/tests/setup_dev_test.sh`
- ✅ Function-based test organization
3. **scripts/tests/test_lib.sh** - Framework improvements:
- ✅ Reliable library loading with fallback paths
- ✅ Safe SCRIPT_PATH variable handling
- ✅ Arithmetic operations compatible with `set -e`
- ✅ Proper script name detection
- ✅ Lazy temporary directory initialization
- ✅ Comprehensive documentation and architecture notes
4. **Centralized Configuration Management**:
- ✅ `.env` file as single source of truth for versions
- ✅ GitHub Actions CI integration with version propagation
- ✅ `scripts/sync_go_version.sh` for configuration synchronization
**🔄 Remaining Scripts to Update:**
These scripts need the same pattern implementations:
- `scripts/distribute.sh` - Needs argument parsing and testing
- `scripts/update_md_tocs.sh` - Needs argument parsing and testing
- `scripts/check_and_fix_env.sh` - Needs argument parsing and testing
- `scripts/template.sh` - Needs argument parsing and testing
- `scripts/menu.sh` - Needs argument parsing and testing
Example script structure:
```bash
#!/bin/bash
#==============================================================================
# fix_and_update.sh
#==============================================================================
#
# DESCRIPTION:
# Runs lint fixes and checks for UTF-8 formatting issues in the project.
# Intended to help maintain code quality and formatting consistency.
#
# USAGE:
# ./scripts/fix_and_update.sh
#
# OPTIONS:
# -h, --help Show this help message
#
# DEPENDENCIES:
# - trunk (for linting)
# - bash
# - ./scripts/check_utf8.sh
#==============================================================================
# Resolves to absolute path and loads library
source "$(cd "$(dirname "$0")" && pwd)/lib.sh"
main_menu() {
if false; then
show_help # Uses the script header to output usage message
fi
}
# ...
# Script logic, variables and functions.
# ...
# Parse common command line arguments and hand the remaining to the script
remaining_args=$(parse_common_args "$@")
# Run main menu
main_menu
```
## Testing Principles
@ -676,7 +838,8 @@ done
We established a balanced approach to test organization:
- Use table-driven tests for simple, repetitive cases without introducing logic
- Use individual test functions for cases that require specific Arrange, Act, Assert steps that cannot be shared amongst other cases
- Use individual test functions for cases that require specific Arrange, Act,
Assert steps that cannot be shared amongst other cases
- Group related test cases that operate on the same API method / function
### 2. Code Structure
@ -684,7 +847,6 @@ We established a balanced approach to test organization:
#### Constants and Variables
```go
const (
manifestVersion = "1.0.0"
manifestGlobalVer = "v2"
@ -696,7 +858,8 @@ var (
)
```
- Define constants for fixed values where the prescence and format is only needed and the value content itself does not effect the behavior under test
- Define constants for fixed values where the prescence and format is only
needed and the value content itself does not effect the behavior under test
- Use variables for reusable test data
- Group related constants and variables together
- Do not prefix constants or variables with `test`
@ -717,7 +880,8 @@ func assertValidJSON(t *testing.T, data string) {
}
```
Example of using functions to abstract away details not relevant to the behavior under test
Example of using functions to abstract away details not relevant to the behavior
under test
```go
type Item struct {
@ -737,7 +901,7 @@ func TestItem_Description(t *testing.T) {
}
AddPrefixToDescription(&item, "prefix: ")
if item.Description != "prefix: original description" {
t.Errorf("got %q, want %q", item.Description, "prefix: original description")
}
@ -745,7 +909,7 @@ func TestItem_Description(t *testing.T) {
// GOOD: Clear focus, reusable arrangement, proper assertions
const (
defaultName = "test item"
defaultName = "test item"
defaultVersion = "1.0.0"
defaultTimeStr = "2025-01-01T00:00:00Z"
)
@ -756,7 +920,7 @@ func createTestItem(t *testing.T, description string) *Item {
if err != nil {
t.Fatalf("failed to parse default time: %v", err)
}
return &Item{
Name: defaultName,
Description: description,
@ -778,10 +942,12 @@ func TestAddPrefixToDescription_WithValidInput_AddsPrefix(t *testing.T) {
}
```
- Create helper functions to reduce duplication and keeps tests focused on the arrangement inputs and how they correspond to the expected output
- Create helper functions to reduce duplication and keeps tests focused on the
arrangement inputs and how they correspond to the expected output
- Use `t.Helper()` for proper test failure reporting
- Keep helpers focused and single-purpose
- Helper functions that require logic should go into their own file and have tests
- Helper functions that require logic should go into their own file and have
tests
### 3. Test Case Patterns
@ -792,10 +958,10 @@ func TestAddPrefixToDescription_WithValidInput_AddsPrefix(t *testing.T) {
func TestFormatMessage_WithEmptyString_ReturnsError(t *testing.T) {
// Arrange
input := ""
// Act
actual, err := FormatMessage(input)
// Assert
assertFormatError(t, actual, err, "input cannot be empty")
}
@ -804,10 +970,10 @@ func TestFormatMessage_WithValidInput_ReturnsUpperCase(t *testing.T) {
// Arrange
input := "test message"
expected := "TEST MESSAGE"
// Act
actual, err := FormatMessage(input)
// Assert
assertFormatSuccess(t, actual, err, expected)
}
@ -816,10 +982,10 @@ func TestFormatMessage_WithMultipleSpaces_PreservesSpacing(t *testing.T) {
// Arrange
input := "hello world"
expected := "HELLO WORLD"
// Act
actual, err := FormatMessage(input)
// Assert
assertFormatSuccess(t, actual, err, expected)
}
@ -846,7 +1012,7 @@ func TestProcessTransaction_WithConcurrentUpdates_PreservesConsistency(t *testin
// Arrange
store := NewTestStore(t)
defer store.Close()
const accountID = "test-account"
initialBalance := decimal.NewFromInt(1000)
arrangeErr := arrangeTestAccount(t, store, accountID, initialBalance)
@ -854,7 +1020,7 @@ func TestProcessTransaction_WithConcurrentUpdates_PreservesConsistency(t *testin
// Act
actualBalance, err := executeConcurrentTransactions(t, store, accountID)
// Assert
expected := initialBalance.Add(decimal.NewFromInt(100)) // 100 transactions of 1 unit each
assertBalanceEquals(t, expected, actualBalance)
@ -871,7 +1037,7 @@ func executeConcurrentTransactions(t *testing.T, store *Store, accountID string)
const numTransactions = 100
var wg sync.WaitGroup
wg.Add(numTransactions)
for i := 0; i < numTransactions; i++ {
go func() {
defer wg.Done()
@ -881,13 +1047,13 @@ func executeConcurrentTransactions(t *testing.T, store *Store, accountID string)
}()
}
wg.Wait()
return store.GetBalance(accountID)
}
func assertBalanceEquals(t *testing.T, expected, actual decimal.Decimal) {
t.Helper()
assert.True(t, expected.Equal(actual),
assert.True(t, expected.Equal(actual),
"balance should be %s, actual was %s", expected, actual)
}
```
@ -895,16 +1061,57 @@ func assertBalanceEquals(t *testing.T, expected, actual decimal.Decimal) {
### 4. Best Practices Applied
1. **Clear Naming**
- Use descriptive test names
- Use test name formats
- `Test<function>_<arrangement>_<expectation>` for free functions, and
- `Test<interface><function>_<arrangement>_<expectation>` for interface functions.
- Name test data clearly and meaningfully
- Name by abstraction, not implementation
- Use `expected` for expected values
- Use `actual` for function results
- Keep test variables consistent across all tests
- Always use "Arrange", "Act", "Assert" as step comments in tests
- Use descriptive test name arrangement and expectation parts
- Use test name formats in a 3 part structure
- `Test<function>_<arrangement>_<expectation>` for free functions, and
- `Test<interface><function>_<arrangement>_<expectation>` for interface
functions.
- The module name is inferred
- Treat the first part as either the type function or the free function
under test
```go
func Test<[type]<function>>_<arrangement>_<expectation>(t *testing.T) {
// Test body
}
```
```go
// Implementation
type Logger {
debug bool
}
var logger Logger
logger.debug = false
func (l* Logger) Log(msg string) {
// ...
}
func SetDebug(v bool) {
logger.debug = v
}
```
```go
// Test
func TestLoggerLog_EmptyMessage_NothingLogged(t *testing.T) {
// Test body
}
func TestSetDebug_PassFalseValue_DebugMessageNotLogged(t *testing.T) {
// Test body
}
```
2. **Test Structure**
- Keep test body simple and linear
@ -921,7 +1128,6 @@ func assertBalanceEquals(t *testing.T, expected, actual decimal.Decimal) {
4. **Test Data Management**
- Centralize test data definitions
- Use `<function>_<arrangement>_<artifact>` naming
- Use constants for fixed values
- Abstract complex data arrangement into helpers
@ -942,7 +1148,7 @@ func assertBalanceEquals(t *testing.T, expected, actual decimal.Decimal) {
#### Before
```go
func TestFeature(t *testing.T) {
func TestFeature_MixedArrangements_ExpectAlotOfDifferentThings(t *testing.T) {
// Mixed arrangement and assertions
// Duplicated code
// Magic values
@ -953,17 +1159,17 @@ func TestFeature(t *testing.T) {
```go
// Before: Mixed concerns, unclear naming, magic values
func TestValidateConfig(t *testing.T) {
func TestValidateConfig_MissingFileAndEmptyPaths_ValidationFails(t *testing.T) {
c := &Config{
Path: "./testdata",
Port: 8080,
MaxRetries: 3,
}
if err := c.Validate(); err != nil {
t.Error("validation failed")
}
c.Path = ""
if err := c.Validate(); err == nil {
t.Error("expected error for empty path")
@ -972,7 +1178,7 @@ func TestValidateConfig(t *testing.T) {
// After: Clear structure, meaningful constants, proper test naming
const (
testConfigPath = "./testdata"
testConfigPath = "./testdata"
defaultPort = 8080
defaultMaxRetries = 3
)
@ -984,10 +1190,10 @@ func TestValidateConfig_WithValidInputs_Succeeds(t *testing.T) {
Port: defaultPort,
MaxRetries: defaultMaxRetries,
}
// Act
err := config.Validate()
// Assert
assert.NoError(t, err, "valid config should pass validation")
}
@ -999,10 +1205,10 @@ func TestValidateConfig_WithEmptyPath_ReturnsError(t *testing.T) {
Port: defaultPort,
MaxRetries: defaultMaxRetries,
}
// Act
err := config.Validate()
// Assert
assert.Error(t, err)
assert.Contains(t, err.Error(), "path cannot be empty")
@ -1040,4 +1246,5 @@ These improvements make the test code:
- More reliable
- More efficient to extend
The patterns and principles can be applied across different types of tests to create a consistent and effective testing strategy.
The patterns and principles can be applied across different types of tests to
create a consistent and effective testing strategy.

View file

@ -95,6 +95,7 @@ cache-apt-pkgs validate [packages]
```
#### Examples
```bash
# Validate package names and versions
cache-apt-pkgs validate python3-dev=3.9.5-3 cmake=3.18.4-2

View file

@ -22,7 +22,7 @@ Thank you for your interest in contributing to cache-apt-pkgs-action! This docum
### 📋 Prerequisites
1. 🔵 [Go 1.23 or later](https://golang.org/dl/)
1. 🔵 [Go 1.23.4 or later](https://golang.org/dl/)
2. 💻 [Visual Studio Code](https://code.visualstudio.com/) (recommended)
3. 📂 [Git](https://git-scm.com/downloads)
@ -40,7 +40,7 @@ Thank you for your interest in contributing to cache-apt-pkgs-action! This docum
```bash
# Interactive menu for all development tasks
./scripts/menu.sh
# Or use individual scripts directly:
./scripts/setup_dev.sh # Set up development environment
./scripts/update_md_tocs.sh # Update table of contents in markdown files
@ -65,9 +65,9 @@ The project includes several utility scripts to help with development:
To access the menu system, run:
```bash
./scripts/menu.sh
```
```bash
./scripts/menu.sh
```
This will present an interactive menu with all available development tasks.
@ -102,9 +102,9 @@ There are two ways to test the GitHub Action workflows:
- 🐧 Non-WSL users (native Linux)
```bash
curl -fsSL https://get.docker.com -o get-docker.sh &&
sudo sh get-docker.sh &&
sudo usermod -aG docker $USER &&
curl -fsSL https://get.docker.com -o get-docker.sh &&
sudo sh get-docker.sh &&
sudo usermod -aG docker $USER &&
sudo systemctl start docker
```
@ -116,7 +116,7 @@ There are two ways to test the GitHub Action workflows:
act push # Run push event workflows
act pull_request # Run PR workflows
act workflow_dispatch -i ref=dev-v2.0 -i debug=true # Manual trigger workflow
```
```
## 📝 Making Changes
@ -171,7 +171,7 @@ There are two ways to test the GitHub Action workflows:
act push # Run push event workflows
act pull_request # Run PR workflows
act workflow_dispatch -i ref=dev-v2.0 -i debug=true # Manual trigger workflow
```
```
## Making Changes
@ -266,7 +266,6 @@ To make the library available on [pkg.go.dev](https://pkg.go.dev):
```
2. 🔄 Trigger pkg.go.dev to fetch your module:
- Visit [pkg.go.dev for this module](https://pkg.go.dev/github.com/awalsh128/cache-apt-pkgs-action@v2.0.0)
- Or fetch via command line:
@ -275,10 +274,9 @@ To make the library available on [pkg.go.dev](https://pkg.go.dev):
```
3. 📝 Best practices for publishing:
- Add comprehensive godoc comments
- Include examples in your documentation
- Use semantic versioning for tags
- Keep the module path consistent
- Update go.mod with the correct module path
- [Go Best Practices](https://golang.org/doc/effective_go#names)
- [Go Best Practices](https://golang.org/doc/effective_go#names)

View file

@ -9,39 +9,42 @@
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [🚀 Quick Start](#-quick-start)
- [✨ Features](#-features)
- [📋 Requirements](#-requirements)
- [🔧 Configuration](#-configuration)
- [Inputs](#inputs)
- [Outputs](#outputs)
- [📝 Usage Guide](#-usage-guide)
- [Version Selection](#version-selection)
- [Basic Example](#basic-example)
- [Advanced Example](#advanced-example)
- [🔍 Cache Details](#-cache-details)
- [Cache Scoping](#cache-scoping)
- [Cache Keys](#cache-keys)
- [Cache Invalidation](#cache-invalidation)
- [🚨 Common Issues](#-common-issues)
- [Permission Issues](#permission-issues)
- [Missing Dependencies](#missing-dependencies)
- [Cache Misses](#cache-misses)
- [🤝 Contributing](#-contributing)
- [📜 License](#-license)
- [🔄 Updates and Maintenance](#-updates-and-maintenance)
- [🌟 Acknowledgements](#-acknowledgements)
- [Getting Started](#getting-started)
- [Workflow Setup](#workflow-setup)
- [Detailed Configuration](#detailed-configuration)
- [Cache scopes](#cache-scopes)
- [Example workflows](#example-workflows)
- [Build and Deploy Doxygen Documentation](#build-and-deploy-doxygen-documentation)
- [Simple Package Installation](#simple-package-installation)
- [Caveats](#caveats)
- [Edge Cases](#edge-cases)
- [Non-file Dependencies](#non-file-dependencies)
- [Cache Limits](#cache-limits)
- [Cache APT Packages Action](#cache-apt-packages-action)
- [🚀 Quick Start](#-quick-start)
- [✨ Features](#-features)
- [📋 Requirements](#-requirements)
- [🔧 Configuration](#-configuration)
- [Inputs](#inputs)
- [Outputs](#outputs)
- [📝 Usage Guide](#-usage-guide)
- [Version Selection](#version-selection)
- [Basic Example](#basic-example)
- [Advanced Example](#advanced-example)
- [🔍 Cache Details](#-cache-details)
- [Cache Scoping](#cache-scoping)
- [Cache Keys](#cache-keys)
- [Cache Invalidation](#cache-invalidation)
- [🚨 Common Issues](#-common-issues)
- [Permission Issues](#permission-issues)
- [Missing Dependencies](#missing-dependencies)
- [Cache Misses](#cache-misses)
- [🤝 Contributing](#-contributing)
- [📜 License](#-license)
- [🔄 Updates and Maintenance](#-updates-and-maintenance)
- [🌟 Acknowledgements](#-acknowledgements)
- [Getting Started](#getting-started)
- [Workflow Setup](#workflow-setup)
- [Detailed Configuration](#detailed-configuration)
- [Input Parameters](#input-parameters)
- [Output Values](#output-values)
- [Cache scopes](#cache-scopes)
- [Example workflows](#example-workflows)
- [Build and Deploy Doxygen Documentation](#build-and-deploy-doxygen-documentation)
- [Simple Package Installation](#simple-package-installation)
- [Caveats](#caveats)
- [Edge Cases](#edge-cases)
- [Non-file Dependencies](#non-file-dependencies)
- [Cache Limits](#cache-limits)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -115,13 +118,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Cache APT Packages
uses: awalsh128/cache-apt-pkgs-action@v2
with:
packages: python3-dev cmake
version: 1.0
- name: Build Project
run: |
cmake .
@ -139,7 +142,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Cache APT Packages
uses: awalsh128/cache-apt-pkgs-action@v2
id: apt-cache
@ -147,7 +150,7 @@ jobs:
packages: python3-dev cmake libboost-all-dev
version: ${{ github.sha }}
execute_install_scripts: true
- name: Cache Info
run: |
echo "Cache hit: ${{ steps.apt-cache.outputs.cache-hit }}"
@ -159,6 +162,7 @@ jobs:
### Cache Scoping
Caches are scoped by:
- Package list
- Version string
- Branch (default branch cache available to other branches)
@ -166,6 +170,7 @@ Caches are scoped by:
### Cache Keys
The action generates cache keys based on:
- Package names and versions
- System architecture
- Custom version string
@ -173,6 +178,7 @@ The action generates cache keys based on:
### Cache Invalidation
Caches are invalidated when:
- Package versions change
- Custom version string changes
- Branch cache is cleared
@ -183,7 +189,7 @@ Caches are invalidated when:
```yaml
permissions:
actions: read|write # Required for cache operations
actions: read|write # Required for cache operations
```
### Missing Dependencies
@ -209,6 +215,7 @@ This project is licensed under the Apache License 2.0 - see the [LICENSE](LICENS
## 🔄 Updates and Maintenance
Stay updated:
- Watch this repository for releases
- Check the [CHANGELOG](CHANGELOG.md)
- Follow the [security policy](SECURITY.md)
@ -301,7 +308,7 @@ jobs:
### Edge Cases
This action is able to speed up installs by skipping the number of steps that `apt` uses.
This action is able to speed up installs by skipping the number of steps that `apt` uses.
- This means there will be certain cases that it may not be able to handle like state management of other file configurations outside the package scope.
- In cases that can't be immediately addressed or run counter to the approach of this action, the packages affected should go into their own action `step` and using the normal `apt` utility.

View file

@ -1,124 +1,121 @@
name: "Cache APT Packages"
description: "Install APT based packages and cache them for future runs."
name: Cache APT Packages
description: Install APT based packages and cache them for future runs.
author: awalsh128
branding:
icon: "hard-drive"
color: "green"
icon: hard-drive
color: green
inputs:
packages:
description: "Space delimited list of packages to install. Version can be specified optionally using APT command syntax of <name>=<version> (e.g. xdot=1.2-2)."
description: Space delimited list of packages to install. Version can be specified optionally using APT command syntax of <name>=<version> (e.g. xdot=1.2-2).
required: true
default: ""
version:
description: "Version of cache to load. Each version will have its own cache. Note, all characters except spaces are allowed."
description: Version of cache to load. Each version will have its own cache. Note, all characters except spaces are allowed.
required: false
default: ""
execute_install_scripts:
description: "Execute Debian package pre and post install script upon restore. See README.md caveats for more information."
description: Execute Debian package pre and post install script upon restore. See README.md caveats for more information.
required: false
default: "false"
refresh:
description: "OBSOLETE: Refresh is not used by the action, use version instead."
deprecationMessage: "Refresh is not used by the action, use version instead."
deprecationMessage: Refresh is not used by the action, use version instead.
debug:
description: "Enable debugging when there are issues with action. Minor performance penalty."
description: Enable debugging when there are issues with action. Minor performance penalty.
required: false
default: "false"
outputs:
cache-hit:
description: "A boolean value to indicate a cache was found for the packages requested."
description: A boolean value to indicate a cache was found for the packages requested.
# This compound expression is needed because lhs can be empty.
# Need to output true and false instead of true and nothing.
value: ${{ steps.load-cache.outputs.cache-hit || false }}
package-version-list:
description: "The main requested packages and versions that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>)."
description: The main requested packages and versions that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>).
value: ${{ steps.post-cache.outputs.package-version-list }}
all-package-version-list:
description: "All the pulled in packages and versions, including dependencies, that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>)."
description: All the pulled in packages and versions, including dependencies, that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>).
value: ${{ steps.post-cache.outputs.all-package-version-list }}
runs:
using: "composite"
env:
CACHE_DIR: ~/cache-apt-pkgs
GLOBAL_VERSION: 20250824
using: composite
steps:
- id: set-shared-env
shell: bash
run: |
echo "ARCH=${{ runner.arch }}" >> "${GITHUB_ENV}"
echo "BINARY_PATH=${BINARY_PATH}" >> "${GITHUB_ENV}"
echo "CACHE_DIR=~/cache-apt-pkgs" >> "${GITHUB_ENV}"
echo "DEBUG=${{ inputs.debug }}" >> "${GITHUB_ENV}"
echo "GLOBAL_VERSION=20250910" >> "${GITHUB_ENV}"
echo "PACKAGES=${{ inputs.packages }}" >> "${GITHUB_ENV}"
echo "VERSION=${{ inputs.version }}" >> "${GITHUB_ENV}"
env:
BINARY_PATH: ${{ github.action_path }}/scripts/distribute.sh getbinpath ${{ runner.arch }}
- id: install-aptfast
shell: bash
run: |
if ! apt-fast --version > /dev/null 2>&1; then
"Installing apt-fast for optimized installs and updates"
echo "Installing apt-fast for optimized installs and updates" &&
/bin/bash -c "$(curl -sL https://raw.githubusercontent.com/ilikenwf/apt-fast/master/quick-install.sh)"
fi
- id: setup-binary
shell: bash
run: |
BINARY_PATH="${{ github.action_path }}/tools/distribute.sh getbinpath ${{ runner.arch }}"
if [ ! -f "$BINARY_PATH" ]; then
echo "Error: Binary not found at $BINARY_PATH"
echo "Please ensure the action has been properly built and binaries are included in the dist directory"
if [[ ! -f "${BINARY_PATH}" ]]; then
echo "Error: Binary not found at ${BINARY_PATH}"
echo "Please ensure the action has been properly built and binaries are included in the distribute directory"
exit 1
fi
- id: create-cache-key
shell: bash
shell: bash
run: |
${BINARY_PATH} createkey \
-os-arch ${{ runner.arch }} \
-os-arch "${ARCH}" \
-plaintext-path "${CACHE_DIR}/cache_key.txt" \
-ciphertext-path "${CACHE_DIR}/cache_key.md5" \
-version "${{ inputs.version }}" \
-global-version "${GLOBAL_VERSION}" \
${{ inputs.packages }}
echo "cache-key=$(cat $CACHE_DIR/cache_key.md5)" >> $GITHUB_OUTPUT
-version "${VERSION}" \
-global-version "${GLOBAL_VERSION}" \
${PACKAGES}
echo "cache-key=$(cat ${CACHE_DIR}/cache_key.md5)" >> "${GITHUB_OUTPUT}"
- id: load-cache
uses: actions/cache/restore@v4
with:
path: ${CACHE_DIR}
path: ${{ env.CACHE_DIR }}
key: cache-apt-pkgs_${{ steps.create-cache-key.outputs.cache-key }}
- id: post-load-cache
# TODO get this implemented
# -exec-install-scripts ${{ inputs.execute_install_scripts }} \
shell: bash
run: |
if [ "$CACHE_HIT" == "true" ]; then
if [ "${CACHE_HIT}" == "true" ]; then
${BINARY_PATH} restore \
-cache-dir "${CACHE_DIR}" \
-restore-root "/" \
"$PACKAGES"
"${PACKAGES}"
else
${BINARY_PATH} install \
-cache-dir "${CACHE_DIR}"
-version "${{ inputs.version }}" \
-global-version "${GLOBAL_VERSION}"
"$PACKAGES""
fi
echo "package-version-list=$(cat "${CACHE_DIR}/pkgs_args.txt")" >> $GITHUB_OUTPUT
echo "all-package-version-list=$(cat "${CACHE_DIR}/pkgs_installed.txt")" >> $GITHUB_OUTPUT
shell: bash
-cache-dir "${CACHE_DIR}" \
-version "${VERSION}" \
-global-version "${GLOBAL_VERSION}" \
"${PACKAGES}"
fi
echo "package-version-list=\"$(cat "${CACHE_DIR}/pkgs_args.txt")\"" >> "${GITHUB_OUTPUT}"
echo "all-package-version-list=\"$(cat "${CACHE_DIR}/pkgs_installed.txt")\"" >> "${GITHUB_OUTPUT}"
env:
CACHE_HIT: "${{ steps.load-cache.outputs.cache-hit }}"
EXEC_INSTALL_SCRIPTS: "${{ inputs.execute_install_scripts }}"
DEBUG: "${{ inputs.debug }}"
PACKAGES: "${{ inputs.packages }}"
- id: upload-logs
CACHE_HIT: ${{ steps.load-cache.outputs.cache-hit }}
EXEC_INSTALL_SCRIPTS: ${{ inputs.execute_install_scripts }}
- id: upload-artifacts
if: ${{ inputs.debug == 'true' }}
uses: actions/upload-artifact@v4
with:
name: cache-apt-pkgs-logs_${{ env.CACHE_KEY }}
path: ~/cache-apt-pkgs/*.log
- id: save-cache
if: ${{ ! steps.load-cache.outputs.cache-hit }}
uses: actions/cache/save@v4
with:
path: ~/cache-apt-pkgs
key: ${{ steps.load-cache.outputs.cache-primary-key }}
- id: clean-cache
run: |
rm -rf ~/cache-apt-pkgs

View file

@ -1,3 +1,6 @@
// Package main implements the cache-apt-pkgs command line tool.
// It provides functionality to cache and restore APT packages in GitHub Actions,
// with commands for creating cache keys, installing packages, and restoring from cache.
package main
import (
@ -5,37 +8,107 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
// ExamplePackages provides a set of sample packages used for testing and documentation.
// It includes rolldice, xdot with a specific version, and libgtk-3-dev.
var ExamplePackages = pkgs.NewPackages(
pkgs.Package{Name: "rolldice"},
pkgs.Package{Name: "xdot", Version: "1.1-2"},
pkgs.Package{Name: "libgtk-3-dev"},
)
// binaryName is the base name of the command executable, used in usage and error messages.
var binaryName = filepath.Base(os.Args[0])
// globalFlags defines the command-line flags that apply to all commands.
// It includes options for verbosity and help documentation.
var globalFlags = func() *flag.FlagSet {
flags := flag.NewFlagSet("global", flag.ExitOnError)
flags.BoolVar(new(bool), "verbose", false, "Enable verbose logging")
flags.BoolVar(new(bool), "v", false, "Enable verbose logging (shorthand)")
flags.BoolVar(new(bool), "help", false, "Show help")
flags.BoolVar(new(bool), "h", false, "Show help (shorthand)")
return flags
}()
func (c *Cmds) usage() {
fmt.Fprintf(os.Stderr, "usage: %s <command> [flags] [packages]\n\n", binaryName)
fmt.Fprintf(os.Stderr, "commands:\n")
for _, cmd := range *c {
fmt.Fprintf(os.Stderr, " %s: %s\n", cmd.Name, cmd.Description)
}
fmt.Fprintf(os.Stderr, "\nflags:\n")
// Print global flags (from any command, since they are the same)
globalFlags.VisitAll(func(f *flag.Flag) {
fmt.Fprintf(os.Stderr, " -%s: %s\n", f.Name, f.Usage)
})
fmt.Fprintf(os.Stderr, "\nUse \"%s <command> --help\" for more information about a command.\n", binaryName)
}
// Cmd represents a command-line subcommand with its associated flags and behavior.
// Each command has a name, description, set of flags, and a function to execute the command.
type Cmd struct {
Name string
Description string
Flags *flag.FlagSet
Examples []string // added Examples field for command usage examples
ExamplePackages pkgs.Packages
// Name is the command identifier used in CLI arguments
Name string
// Description explains what the command does
Description string
// Flags contains the command-specific command-line flags
Flags *flag.FlagSet
// Run executes the command with the given packages and returns any errors
Run func(cmd *Cmd, pkgArgs pkgs.Packages) error
Examples []string
ExamplePackages pkgs.Packages
}
// NewCmd creates a new command with the given name, description, examples, and run function.
// It automatically includes global flags and sets up the usage documentation.
// The returned Cmd is ready to be used as a subcommand in the CLI.
func NewCmd(name, description string, examples []string, runFunc func(cmd *Cmd, pkgArgs pkgs.Packages) error) *Cmd {
flags := flag.NewFlagSet(name, flag.ExitOnError)
globalFlags.VisitAll(func(f *flag.Flag) {
flags.Var(f.Value, f.Name, f.Usage)
})
flags.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: %s %s [flags] [packages]\n\n%s\n\n", binaryName, name, description)
fmt.Fprintf(os.Stderr, "flags:\n")
flags.VisitAll(func(f *flag.Flag) {
fmt.Fprintf(os.Stderr, " -%s: %s\n", f.Name, f.Usage)
})
fmt.Fprintf(os.Stderr, "\nexamples:\n")
for _, example := range examples {
fmt.Fprintf(os.Stderr, " %s %s %s\n", binaryName, name, example)
}
}
return &Cmd{
Name: name,
Description: description,
Flags: flags,
Run: runFunc,
Examples: examples,
ExamplePackages: ExamplePackages,
}
}
// StringFlag returns the string value of a flag by name.
// It panics if the flag does not exist, so ensure the flag exists before calling.
func (c *Cmd) StringFlag(name string) string {
return c.Flags.Lookup(name).Value.String()
}
// binaryName returns the base name of the command without the path
var binaryName = filepath.Base(os.Args[0])
// Cmds is a collection of subcommands indexed by their names.
// It provides methods for managing and executing CLI subcommands.
type Cmds map[string]*Cmd
// parseFlags processes command line arguments for the command.
// It validates required flags and parses package arguments.
// Returns the parsed package arguments or exits with an error if validation fails.
func (c *Cmd) parseFlags() pkgs.Packages {
logging.Debug("Parsing flags for command %q with args: %v", c.Name, os.Args[2:])
if len(os.Args) < 3 {
logging.Fatalf("command %q requires arguments", c.Name)
}
@ -47,8 +120,11 @@ func (c *Cmd) parseFlags() pkgs.Packages {
// Check for missing required flags
missingFlagNames := []string{}
c.Flags.VisitAll(func(f *flag.Flag) {
// Consider all flags as required
if f.Value.String() == "" && f.DefValue == "" && f.Name != "help" {
// Skip all global flags since they are considered optional
if gf := globalFlags.Lookup(f.Name); gf != nil {
return
}
if f.DefValue == "" && f.Value.String() == "" {
logging.Info("Missing required flag: %s", f.Name)
missingFlagNames = append(missingFlagNames, f.Name)
}
@ -56,15 +132,19 @@ func (c *Cmd) parseFlags() pkgs.Packages {
if len(missingFlagNames) > 0 {
logging.Fatalf("missing required flags for command %q: %s", c.Name, missingFlagNames)
}
logging.Debug("Parsed flags successfully")
// Parse the remaining arguments as package arguments
pkgArgs, err := pkgs.ParsePackageArgs(c.Flags.Args())
if err != nil {
logging.Fatalf("failed to parse package arguments for command %q: %v", c.Name, err)
}
logging.Debug("Parsed package arguments:\n%s", strings.Join(c.Flags.Args(), "\n "))
return pkgArgs
}
// Add registers a new command to the command set.
// Returns an error if a command with the same name already exists.
func (c *Cmds) Add(cmd *Cmd) error {
if _, exists := (*c)[cmd.Name]; exists {
return fmt.Errorf("command %q already exists", cmd.Name)
@ -72,97 +152,41 @@ func (c *Cmds) Add(cmd *Cmd) error {
(*c)[cmd.Name] = cmd
return nil
}
// Get retrieves a command by name.
// Returns the command and true if found, or nil and false if not found.
func (c *Cmds) Get(name string) (*Cmd, bool) {
cmd, ok := (*c)[name]
return cmd, ok
}
func (c *Cmd) getFlagCount() int {
count := 0
c.Flags.VisitAll(func(f *flag.Flag) {
count++
})
return count
}
func (c *Cmd) help() {
if c.getFlagCount() == 0 {
fmt.Fprintf(os.Stderr, "usage: %s %s [packages]\n\n", binaryName, c.Name)
fmt.Fprintf(os.Stderr, "%s\n\n", c.Description)
} else {
fmt.Fprintf(os.Stderr, "usage: %s %s [flags] [packages]\n\n", binaryName, c.Name)
fmt.Fprintf(os.Stderr, "%s\n\n", c.Description)
fmt.Fprintf(os.Stderr, "Flags:\n")
c.Flags.PrintDefaults()
}
if c.ExamplePackages == nil && len(c.Examples) == 0 {
return
}
fmt.Fprintf(os.Stderr, "\nExamples:\n")
if len(c.Examples) == 0 {
fmt.Fprintf(os.Stderr, " %s %s %s\n", binaryName, c.Name, c.ExamplePackages.String())
return
}
for _, example := range c.Examples {
fmt.Fprintf(
os.Stderr,
" %s %s %s %s\n",
binaryName,
c.Name,
example,
c.ExamplePackages.String(),
)
}
}
func printUsage(cmds Cmds) {
fmt.Fprintf(os.Stderr, "usage: %s <command> [flags] [packages]\n\n", binaryName)
fmt.Fprintf(os.Stderr, "commands:\n")
// Get max length for alignment
maxLen := 0
for name := range cmds {
if len(name) > maxLen {
maxLen = len(name)
}
}
// Print aligned command descriptions
for name, cmd := range cmds {
fmt.Fprintf(os.Stderr, " %-*s %s\n", maxLen, name, cmd.Description)
}
fmt.Fprintf(
os.Stderr,
"\nUse \"%s <command> --help\" for more information about a command\n",
binaryName,
)
}
// Parse processes the command line arguments to determine the command to run
// and its package arguments. Handles help requests and invalid commands.
// Returns the selected command and its parsed package arguments, or exits on error.
func (c *Cmds) Parse() (*Cmd, pkgs.Packages) {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "error: no command specified\n\n")
printUsage(*c)
c.usage()
os.Exit(1)
}
cmdName := os.Args[1]
if cmdName == "--help" || cmdName == "-h" {
printUsage(*c)
c.usage()
os.Exit(0)
}
cmd, ok := c.Get(cmdName)
if !ok {
fmt.Fprintf(os.Stderr, "error: unknown command %q\n\n", binaryName)
printUsage(*c)
c.usage()
os.Exit(1)
}
// Handle command-specific help
for _, arg := range os.Args[2:] {
if arg == "--help" || arg == "-h" {
cmd.help()
c.usage()
os.Exit(0)
}
}
@ -170,13 +194,15 @@ func (c *Cmds) Parse() (*Cmd, pkgs.Packages) {
pkgArgs := cmd.parseFlags()
if pkgArgs == nil {
fmt.Fprintf(os.Stderr, "error: no package arguments specified for command %q\n\n", cmd.Name)
cmd.help()
cmd.Flags.Usage()
os.Exit(1)
}
return cmd, pkgArgs
}
// CreateCmds initializes a new command set with the provided commands.
// Each command is added to the set, and the resulting set is returned.
func CreateCmds(cmd ...*Cmd) *Cmds {
commands := &Cmds{}
for _, c := range cmd {

View file

@ -0,0 +1,177 @@
package main
import (
"flag"
"os"
"testing"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
const (
flagSetName = "test_flag_set_name"
flagName = "test-flag"
flagValue = "test_flag_value"
flagDefaultValue = "test_default_flag_value"
cmdName = "test-command-name"
cmdName1 = "test-command-name1"
cmdName2 = "test-command-name2"
)
func TestCmd_StringFlag(t *testing.T) {
cmd := &Cmd{
Name: cmdName,
Flags: flag.NewFlagSet(flagSetName, flag.ContinueOnError),
}
cmd.Flags.String(flagName, flagDefaultValue, "test flag")
// Parse some args to set the flag value
cmd.Flags.Set(flagName, flagValue)
result := cmd.StringFlag(flagName)
if result != flagValue {
t.Errorf("Expected 'custom-value', got '%s'", result)
}
}
func TestCmds_Add(t *testing.T) {
cmds := &Cmds{}
*cmds = make(map[string]*Cmd)
cmd := &Cmd{Name: "test"}
err := cmds.Add(cmd)
if err != nil {
t.Errorf("Unexpected error adding command: %v", err)
}
// Try to add the same command again
err = cmds.Add(cmd)
if err == nil {
t.Error("Expected error when adding duplicate command")
}
}
func TestCmds_Get(t *testing.T) {
cmds := &Cmds{}
*cmds = make(map[string]*Cmd)
cmd := &Cmd{Name: cmdName}
cmds.Add(cmd)
retrieved, ok := cmds.Get(cmdName)
if !ok {
t.Errorf("Expected to find command '%s'", cmdName)
}
if retrieved.Name != cmdName {
t.Errorf("Expected command name '%s', got '%s'", cmdName, retrieved.Name)
}
_, ok = cmds.Get("nonexistent-cmd")
if ok {
t.Error("Expected not to find command 'nonexistent-cmd'")
}
}
func TestCreateCmds(t *testing.T) {
cmd1 := &Cmd{Name: cmdName1}
cmd2 := &Cmd{Name: cmdName2}
cmds := CreateCmds(cmd1, cmd2)
if cmds == nil {
t.Fatal("CreateCmds returned nil")
}
if _, ok := cmds.Get(cmdName1); !ok {
t.Errorf("Expected to find %s", cmdName1)
}
if _, ok := cmds.Get(cmdName2); !ok {
t.Errorf("Expected to find %s", cmdName2)
}
}
func TestCmd_ParseFlags(t *testing.T) {
origArgs := os.Args
defer func() { os.Args = origArgs }()
t.Run("missing command", func(t *testing.T) {
// Test the condition that would trigger the missing command error
// without actually calling Parse() which would exit the test process
os.Args = []string{binaryName}
if len(os.Args) < 2 {
t.Log("Successfully detected missing command condition")
} else {
t.Error("Expected os.Args to have fewer than 2 elements")
}
})
const argExample = "test-package"
const requiredFlagName = "required-flag"
t.Run("missing required flags", func(t *testing.T) {
// This test also has issues because Parse() eventually calls os.Exit
// Let's test the flag parsing logic more directly
cmd := NewCmd(flagSetName, "Test command", []string{argExample}, func(cmd *Cmd, pkgArgs pkgs.Packages) error {
return nil
})
cmd.Flags.String(requiredFlagName, "", "required flag description")
// Test that the flag was added
requiredFlag := cmd.Flags.Lookup(requiredFlagName)
if requiredFlag == nil {
t.Error("Expected required-flag to be registered")
}
if requiredFlag.DefValue != "" {
t.Error("Expected required-flag to have empty default value")
}
})
t.Run("missing package arguments", func(t *testing.T) {
// Test the condition without calling Parse()
os.Args = []string{binaryName, cmdName}
if len(os.Args) >= 2 {
t.Log("Command name would be available, but package args would be missing")
} else {
t.Error("Expected at least 2 args for this test")
}
})
const pkgArg1 = "test-package=1.1-beta"
const pkgArg2 = "test-package=2.0"
t.Run("valid command with packages", func(t *testing.T) {
// Test argument parsing without calling the full Parse() method
os.Args = []string{binaryName, cmdName, pkgArg1, pkgArg2}
if len(os.Args) >= 4 {
actualCmdName := os.Args[1]
actualPkgArgs := os.Args[2:]
if actualCmdName != "test" {
t.Errorf("Expected command '%s', got %s", cmdName, actualCmdName)
}
if len(actualPkgArgs) != 2 {
t.Errorf("Expected 2 package args, got %d", len(actualPkgArgs))
}
} else {
t.Error("Expected at least 4 args for this test")
}
})
t.Run("help flag detection", func(t *testing.T) {
// Test help flag detection logic
os.Args = []string{binaryName, "--help"}
if len(os.Args) >= 2 {
cmdName := os.Args[1]
if cmdName == "--help" || cmdName == "-h" {
t.Log("Successfully detected help flag")
} else {
t.Errorf("Expected help flag, got %s", cmdName)
}
}
})
}

View file

@ -1,43 +1,50 @@
package main
import (
"flag"
"fmt"
"path/filepath"
"runtime"
"awalsh128.com/cache-apt-pkgs-action/internal/cache"
"awalsh128.com/cache-apt-pkgs-action/internal/logging"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
func createKey(cmd *Cmd, pkgArgs pkgs.Packages) error {
key := cache.Key{
Packages: pkgArgs,
Version: cmd.StringFlag("version"),
GlobalVersion: cmd.StringFlag("global-version"),
OsArch: cmd.StringFlag("os-arch"),
key, err := cache.NewKey(
pkgArgs,
cmd.StringFlag("version"),
cmd.StringFlag("global-version"),
cmd.StringFlag("os-arch"))
if err != nil {
return fmt.Errorf("failed to create cache key: %w", err)
}
logging.Info("Created cache key: %s (%x)", key.String(), key.Hash())
cacheDir := cmd.StringFlag("cache-dir")
plaintextPath := filepath.Join(cacheDir, "cache_key.txt")
ciphertextPath := filepath.Join(cacheDir, "cache_key.md5")
if err := key.Write(
filepath.Join(cacheDir, "cache_key.txt"),
filepath.Join(cacheDir, "cache_key.md5")); err != nil {
return fmt.Errorf("failed to write cache key: %w", err)
plaintextPath,
ciphertextPath); err != nil {
return fmt.Errorf("failed to write cache keys: %w", err)
}
logging.Info("Wrote cache key files:\n %s\n %s", plaintextPath, ciphertextPath)
return nil
}
func GetCreateKeyCmd() *Cmd {
cmd := &Cmd{
Name: "createkey",
Description: "Create a cache key based on the provided options",
Flags: flag.NewFlagSet("createkey", flag.ExitOnError),
Run: createKey,
examples := []string{
"--os-arch amd64 --cache-dir ~/cache_dir --version 1.0.0 --global-version 1",
"--os-arch x86_64 --cache-dir /tmp/cache_dir --version v2 --global-version 2",
}
cmd := NewCmd("createkey", "Create a cache key based on the provided options", examples, createKey)
cmd.Flags.String("os-arch", runtime.GOARCH,
"OS architecture to use in the cache key.\n"+
"Action may be called from different runners in a different OS. This ensures the right one is fetched")
cmd.Flags.String("cache-dir", "", "Directory that holds the cached packages, JSON manifest and package lists in text format")
cmd.Flags.String("plaintext-path", "", "Path to the plaintext cache key file")
cmd.Flags.String("ciphertext-path", "", "Path to the hashed cache key file")
cmd.Flags.String("version", "", "Version of the cache key to force cache invalidation")

View file

@ -0,0 +1,133 @@
package main
import (
"os"
"path/filepath"
"runtime"
"testing"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
func TestGetCreateKeyCmd(t *testing.T) {
cmd := GetCreateKeyCmd()
if cmd == nil {
t.Fatal("GetCreateKeyCmd returned nil")
}
if cmd.Name != "createkey" {
t.Errorf("Expected command name 'createkey', got '%s'", cmd.Name)
}
if cmd.Description == "" {
t.Error("Expected non-empty description")
}
if cmd.Flags == nil {
t.Fatal("Expected flags to be initialized")
}
if cmd.Run == nil {
t.Fatal("Expected Run function to be set")
}
// Check that required flags are present
expectedFlags := []string{
"os-arch",
"plaintext-path",
"ciphertext-path",
"version",
"global-version",
"cache-dir",
}
for _, flagName := range expectedFlags {
if cmd.Flags.Lookup(flagName) == nil {
t.Errorf("Expected flag '%s' to be defined", flagName)
}
}
// Check default value for os-arch
osArchFlag := cmd.Flags.Lookup("os-arch")
if osArchFlag != nil && osArchFlag.DefValue != runtime.GOARCH {
t.Errorf(
"Expected os-arch default to be '%s', got '%s'",
runtime.GOARCH,
osArchFlag.DefValue,
)
}
}
func TestCreateKey_Success(t *testing.T) {
// Create a temporary directory for testing
tmpDir, err := os.MkdirTemp("", "cache_test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
// Create a mock command with required flags
cmd := GetCreateKeyCmd()
cmd.Flags.Set("version", "1.0")
cmd.Flags.Set("global-version", "1.0")
cmd.Flags.Set("os-arch", "amd64")
cmd.Flags.Set("cache-dir", tmpDir)
// Create test packages
packages := pkgs.NewPackages(pkgs.Package{Name: "test-package"})
// Run the createKey function
err = createKey(cmd, packages)
if err != nil {
t.Errorf("createKey failed: %v", err)
}
// Verify that cache key files were created
keyFile := filepath.Join(tmpDir, "cache_key.txt")
md5File := filepath.Join(tmpDir, "cache_key.md5")
if _, err := os.Stat(keyFile); os.IsNotExist(err) {
t.Error("cache_key.txt was not created")
}
if _, err := os.Stat(md5File); os.IsNotExist(err) {
t.Error("cache_key.md5 was not created")
}
}
func TestCreateKey_InvalidCacheDir(t *testing.T) {
cmd := GetCreateKeyCmd()
cmd.Flags.Set("version", "1.0")
cmd.Flags.Set("global-version", "1.0")
cmd.Flags.Set("os-arch", "amd64")
cmd.Flags.Set("cache-dir", "/nonexistent/directory")
packages := pkgs.NewPackages(pkgs.Package{Name: "test-package"})
err := createKey(cmd, packages)
if err == nil {
t.Error("Expected error when using invalid cache directory")
}
}
func TestCreateKey_EmptyPackages(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "cache_test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
cmd := GetCreateKeyCmd()
cmd.Flags.Set("version", "1.0")
cmd.Flags.Set("global-version", "1.0")
cmd.Flags.Set("os-arch", "amd64")
cmd.Flags.Set("cache-dir", tmpDir)
// Empty packages
packages := pkgs.NewPackages()
err = createKey(cmd, packages)
if err != nil {
t.Errorf("createKey should handle empty packages, got error: %v", err)
}
}

View file

@ -5,6 +5,7 @@ import (
"fmt"
"path/filepath"
"runtime"
"strings"
"time"
"awalsh128.com/cache-apt-pkgs-action/internal/cache"
@ -18,19 +19,21 @@ func install(cmd *Cmd, pkgArgs pkgs.Packages) error {
return fmt.Errorf("error initializing APT: %v", err)
}
logging.Info("Installing packages.")
logging.Debug("Package list: %v.", pkgArgs)
logging.Info("Installing packages:\n%s.", strings.Join(pkgArgs.StringArray(), "\n "))
installedPkgs, err := apt.Install(pkgArgs)
if err != nil {
return fmt.Errorf("error installing packages: %v", err)
}
manifestKey := cache.Key{
Packages: pkgArgs,
Version: cmd.StringFlag("version"),
GlobalVersion: cmd.StringFlag("global-version"),
OsArch: runtime.GOARCH,
manifestKey, err := cache.NewKey(
pkgArgs,
cmd.StringFlag("version"),
cmd.StringFlag("global-version"),
runtime.GOARCH,
)
if err != nil {
return fmt.Errorf("error creating manifest key: %v", err)
}
pkgManifests := make([]cache.ManifestPackage, installedPkgs.Len())
@ -40,6 +43,7 @@ func install(cmd *Cmd, pkgArgs pkgs.Packages) error {
if err != nil {
return err
}
logging.Debug("Package %s installed files:\n%s", pkg.String(), strings.Join(files, "\n"))
pkgManifests[i] = cache.ManifestPackage{
Package: *pkg,
Filepaths: files,
@ -52,10 +56,11 @@ func install(cmd *Cmd, pkgArgs pkgs.Packages) error {
}
manifestPath := filepath.Join(cmd.StringFlag("cache-dir"), "manifest.json")
logging.Info("Writing manifest to %s.", manifestPath)
if err := cache.Write(manifestPath, manifest); err != nil {
return fmt.Errorf("error writing manifest to %s: %v", manifestPath, err)
}
logging.Info("Writing manifest to %s.", manifestPath)
logging.Info("Wrote manifest to %s.", manifestPath)
logging.Info("Completed package installation.")
return nil
}

View file

@ -0,0 +1,111 @@
package main
import (
"os"
"path/filepath"
"testing"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
func TestGetInstallCmd(t *testing.T) {
cmd := GetInstallCmd()
if cmd == nil {
t.Fatal("GetInstallCmd returned nil")
}
if cmd.Name != "install" {
t.Errorf("Expected command name 'install', got '%s'", cmd.Name)
}
if cmd.Description == "" {
t.Error("Expected non-empty description")
}
if cmd.Flags == nil {
t.Fatal("Expected flags to be initialized")
}
if cmd.Run == nil {
t.Fatal("Expected Run function to be set")
}
if cmd.ExamplePackages == nil {
t.Error("Expected ExamplePackages to be set")
}
if len(cmd.Examples) == 0 {
t.Error("Expected Examples to be set")
}
// Check that required flags are present
expectedFlags := []string{"cache-dir", "version", "global-version"}
for _, flagName := range expectedFlags {
if cmd.Flags.Lookup(flagName) == nil {
t.Errorf("Expected flag '%s' to be defined", flagName)
}
}
}
// Note: Testing the actual install function requires APT and system-level access
// This test focuses on the command structure and error handling
func TestInstall_Structure(t *testing.T) {
cmd := GetInstallCmd()
// Create a temporary directory for testing
tmpDir, err := os.MkdirTemp("", "install_test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
// Set up command flags
cmd.Flags.Set("cache-dir", tmpDir)
cmd.Flags.Set("version", "1.0")
cmd.Flags.Set("global-version", "1.0")
packages := pkgs.NewPackages(pkgs.Package{Name: "test-package"})
// The install function will likely fail in test environment without APT
// but we can verify the function exists and is callable
err = install(cmd, packages)
// We expect an error because APT is likely not available in test environment
// The important thing is that the function doesn't panic
if err == nil {
// If no error, check that manifest files were created
manifestFile := filepath.Join(tmpDir, "manifest.json")
if _, err := os.Stat(manifestFile); os.IsNotExist(err) {
t.Log(
"Note: install succeeded but manifest.json not found - this may be expected in test environment",
)
}
} else {
t.Logf("install function returned expected error in test environment: %v", err)
}
}
func TestInstall_EmptyPackages(t *testing.T) {
cmd := GetInstallCmd()
tmpDir, err := os.MkdirTemp("", "install_test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
cmd.Flags.Set("cache-dir", tmpDir)
cmd.Flags.Set("version", "1.0")
cmd.Flags.Set("global-version", "1.0")
packages := pkgs.NewPackages()
// The install function should handle empty packages gracefully
err = install(cmd, packages)
// We expect this to fail due to APT not being available, but it shouldn't panic
if err != nil {
t.Logf("install with empty packages returned expected error: %v", err)
}
}

View file

@ -5,7 +5,7 @@ import (
)
func main() {
logging.Init("cache_apt_pkgs", true)
logging.Init(true)
commands := CreateCmds(
GetCreateKeyCmd(),

View file

@ -0,0 +1,154 @@
package main
import (
"os"
"os/exec"
"path/filepath"
"testing"
)
// Integration test for the real commands used by main.
func TestIntegration_MainCommands(t *testing.T) {
// Build the binary first
binaryPath := filepath.Join(t.TempDir(), "cache-apt-pkgs")
cmd := exec.Command("go", "build", "-o", binaryPath, ".")
cmd.Dir = "."
if err := cmd.Run(); err != nil {
t.Fatalf("Failed to build binary: %v", err)
}
// Test cases for different subcommands
testCases := []struct {
name string
args []string
expectError bool
}{
{
name: "help",
args: []string{"--help"},
expectError: false, // --help exits with 0
},
{
name: "no_args",
args: []string{},
expectError: true, // no command specified
},
{
name: "unknown_command",
args: []string{"unknown"},
expectError: true,
},
{
name: "createkey_help",
args: []string{"createkey", "--help"},
expectError: false, // command help exits with 0
},
{
name: "install_help",
args: []string{"install", "--help"},
expectError: false,
},
{
name: "restore_help",
args: []string{"restore", "--help"},
expectError: false,
},
{
name: "validate_help",
args: []string{"validate", "--help"},
expectError: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
cmd := exec.Command(binaryPath, tc.args...)
err := cmd.Run()
if tc.expectError && err == nil {
t.Errorf("Expected error for %s but got none", tc.name)
}
if !tc.expectError && err != nil {
t.Errorf("Expected no error for %s but got: %v", tc.name, err)
}
})
}
}
// Test that commands can be executed (they may fail due to missing dependencies, but shouldn't crash)
func TestIntegration_CommandExecution(t *testing.T) {
// Create temp directory for testing
tmpDir := t.TempDir()
// Build the binary
binaryPath := filepath.Join(tmpDir, "cache-apt-pkgs")
cmd := exec.Command("go", "build", "-o", binaryPath, ".")
cmd.Dir = "."
if err := cmd.Run(); err != nil {
t.Fatalf("Failed to build binary: %v", err)
}
// Test createkey with minimal args (should work without APT)
t.Run("createkey_execution", func(t *testing.T) {
cacheDir := filepath.Join(tmpDir, "cache")
if err := os.MkdirAll(cacheDir, 0755); err != nil {
t.Fatalf("Failed to create cache dir: %v", err)
}
cmd := exec.Command(binaryPath, "createkey",
"--cache-dir", cacheDir,
"--version", "1.0",
"--global-version", "1.0",
"test-package")
// This should succeed since createkey doesn't require APT
if err := cmd.Run(); err != nil {
t.Logf("createkey execution failed (may be expected in test environment): %v", err)
} else {
// Check if cache key files were created
keyFile := filepath.Join(cacheDir, "cache_key.txt")
md5File := filepath.Join(cacheDir, "cache_key.md5")
if _, err := os.Stat(keyFile); err != nil {
t.Errorf("cache_key.txt was not created: %v", err)
}
if _, err := os.Stat(md5File); err != nil {
t.Errorf("cache_key.md5 was not created: %v", err)
}
}
})
// Test other commands (expected to fail without APT but shouldn't crash)
testCommands := []struct {
name string
args []string
}{
{"validate", []string{"validate", "test-package"}},
{
"install",
[]string{
"install",
"--cache-dir",
tmpDir,
"--version",
"1.0",
"--global-version",
"1.0",
"test-package",
},
},
{"restore", []string{"restore", "--cache-dir", tmpDir, "test-package"}},
}
for _, tc := range testCommands {
t.Run(tc.name+"_no_crash", func(t *testing.T) {
cmd := exec.Command(binaryPath, tc.args...)
err := cmd.Run()
// We expect these to fail in test environment, but they shouldn't crash
if err != nil {
t.Logf("%s command failed as expected in test environment: %v", tc.name, err)
}
})
}
}

View file

@ -0,0 +1,53 @@
package main
import (
"testing"
)
func TestMain_CommandStructure(t *testing.T) {
// Test that all commands are properly initialized
commands := CreateCmds(
GetCreateKeyCmd(),
GetInstallCmd(),
GetRestoreCmd(),
GetValidateCmd(),
)
if commands == nil {
t.Fatal("CreateCmds returned nil")
}
// Check that all expected commands exist
expectedCommands := []string{"createkey", "install", "restore", "validate"}
for _, cmdName := range expectedCommands {
if _, ok := commands.Get(cmdName); !ok {
t.Errorf("Expected command '%s' to be available", cmdName)
}
}
}
func TestMain_AllCommandsHaveRequiredFields(t *testing.T) {
commands := CreateCmds(
GetCreateKeyCmd(),
GetInstallCmd(),
GetRestoreCmd(),
GetValidateCmd(),
)
for cmdName, cmd := range *commands {
t.Run(cmdName, func(t *testing.T) {
if cmd.Name == "" {
t.Error("Command name should not be empty")
}
if cmd.Description == "" {
t.Error("Command description should not be empty")
}
if cmd.Flags == nil {
t.Error("Command flags should not be nil")
}
if cmd.Run == nil {
t.Error("Command Run function should not be nil")
}
})
}
}

View file

@ -30,5 +30,6 @@ func GetRestoreCmd() *Cmd {
"--cache-dir /tmp/cache_dir --restore-root /",
}
cmd.ExamplePackages = ExamplePackages
return cmd
}

View file

@ -0,0 +1,88 @@
package main
import (
"testing"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
func TestGetRestoreCmd(t *testing.T) {
cmd := GetRestoreCmd()
if cmd == nil {
t.Fatal("GetRestoreCmd returned nil")
}
if cmd.Name != "restore" {
t.Errorf("Expected command name 'restore', got '%s'", cmd.Name)
}
if cmd.Description == "" {
t.Error("Expected non-empty description")
}
if cmd.Flags == nil {
t.Fatal("Expected flags to be initialized")
}
if cmd.Run == nil {
t.Fatal("Expected Run function to be set")
}
if cmd.ExamplePackages == nil {
t.Error("Expected ExamplePackages to be set")
}
if len(cmd.Examples) == 0 {
t.Error("Expected Examples to be set")
}
// Check that required flags are present
expectedFlags := []string{"cache-dir", "restore-root", "execute-scripts"}
for _, flagName := range expectedFlags {
if cmd.Flags.Lookup(flagName) == nil {
t.Errorf("Expected flag '%s' to be defined", flagName)
}
}
// Check default values
restoreRootFlag := cmd.Flags.Lookup("restore-root")
if restoreRootFlag != nil && restoreRootFlag.DefValue != "/" {
t.Errorf("Expected restore-root default to be '/', got '%s'", restoreRootFlag.DefValue)
}
executeScriptsFlag := cmd.Flags.Lookup("execute-scripts")
if executeScriptsFlag != nil && executeScriptsFlag.DefValue != "false" {
t.Errorf(
"Expected execute-scripts default to be 'false', got '%s'",
executeScriptsFlag.DefValue,
)
}
}
func TestRestore_NotImplemented(t *testing.T) {
cmd := GetRestoreCmd()
packages := pkgs.NewPackages(pkgs.Package{Name: "test-package"})
// The restore function is not implemented and should return an error
err := restore(cmd, packages)
if err == nil {
t.Error("Expected error from unimplemented restore function")
}
expectedMsg := "restorePackages not implemented"
if err.Error() != expectedMsg {
t.Errorf("Expected error message '%s', got '%s'", expectedMsg, err.Error())
}
}
func TestRestore_EmptyPackages(t *testing.T) {
cmd := GetRestoreCmd()
packages := pkgs.NewPackages()
// Even with empty packages, restore should return not implemented error
err := restore(cmd, packages)
if err == nil {
t.Error("Expected error from unimplemented restore function")
}
}

View file

@ -0,0 +1,67 @@
package main
import (
"testing"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
func TestGetValidateCmd(t *testing.T) {
cmd := GetValidateCmd()
if cmd == nil {
t.Fatal("GetValidateCmd returned nil")
}
if cmd.Name != "validate" {
t.Errorf("Expected command name 'validate', got '%s'", cmd.Name)
}
if cmd.Description == "" {
t.Error("Expected non-empty description")
}
if cmd.Flags == nil {
t.Fatal("Expected flags to be initialized")
}
if cmd.Run == nil {
t.Fatal("Expected Run function to be set")
}
if cmd.ExamplePackages == nil {
t.Error("Expected ExamplePackages to be set")
}
}
// Note: Testing the actual validate function requires APT which may not be available in test environment
// This test focuses on the command structure and basic functionality
func TestValidate_EmptyPackages(t *testing.T) {
cmd := GetValidateCmd()
packages := pkgs.NewPackages()
// With no packages, validation should succeed (no packages to validate)
err := validate(cmd, packages)
if err != nil {
t.Errorf("validate with empty packages should succeed, got error: %v", err)
}
}
// Mock test that doesn't require APT to be installed
func TestValidate_Structure(t *testing.T) {
cmd := GetValidateCmd()
// Verify the command is properly structured
if cmd.Run == nil {
t.Error("Expected Run function to be set")
}
// Test that we can create packages to validate (structure test)
packages := pkgs.NewPackages(pkgs.Package{Name: "test-package"})
if packages.Len() != 1 {
t.Error("Failed to create test packages")
}
// Note: We can't test the actual validation without APT installed
// The validate function will likely fail in test environment, which is expected
}

163
cmd/parse_action/action.sh Executable file
View file

@ -0,0 +1,163 @@
#!/bin/bash
####################################################################################################
#
# Name: Cache APT Packages
# Description: Install APT based packages and cache them for future runs.
# Author: awalsh128
#
# Branding:
# Icon: hard-drive
# Color: green
#
# Inputs:
# Packages:
# Description: Space delimited list of packages to install. Version can be specified optionally using APT command syntax of <name>=<version> (e.g. xdot=1.2-2).
# Required: true
# Default:
# Version:
# Description: Version of cache to load. Each version will have its own cache. Note, all characters except spaces are allowed.
# Required: false
# Default:
# Execute Install Scripts:
# Description: Execute Debian package pre and post install script upon restore. See README.md caveats for more information.
# Required: false
# Default: false
# Refresh:
# Description: OBSOLETE: Refresh is not used by the action, use version instead.
# Required: false
# Default:
# Deprecation Message: Refresh is not used by the action, use version instead.
# Debug:
# Description: Enable debugging when there are issues with action. Minor performance penalty.
# Required: false
# Default: false
#
#
# Outputs:
# Cache Hit:
# Description: A boolean value to indicate a cache was found for the packages requested.
# Value: ${{ steps.load-cache.outputs.cache-hit || false }}
# Package Version List:
# Description: The main requested packages and versions that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>).
# Value: ${{ steps.post-cache.outputs.package-version-list }}
# All Package Version List:
# Description: All the pulled in packages and versions, including dependencies, that are installed. Represented as a comma delimited list with equals delimit on the package version (i.e. <package>:<version,<package>:<version>).
# Value: ${{ steps.post-cache.outputs.all-package-version-list }}
#
####################################################################################################
set -e
INPUTS_EXECUTE_INSTALL_SCRIPTS="false"
INPUTS_REFRESH="false"
INPUTS_DEBUG="false"
RUNNER_ARCH="X86_64"
GITHUB_ACTION_PATH="../../"
INPUTS_PACKAGES="xdot,rolldice"
INPUTS_VERSION="0"
#===================================================================================================
# Step ID: set-shared-env
#===================================================================================================
STEP_SET_SHARED_ENV_ENV_BINARY_PATH="${GITHUB_ACTION_PATH}/scripts/distribute.sh getbinpath ${RUNNER_ARCH}"
GH_ENV_ARCH="${RUNNER_ARCH}"
GH_ENV_BINARY_PATH="${BINARY_PATH}"
GH_ENV_CACHE_DIR="~/cache-apt-pkgs"
GH_ENV_DEBUG="${INPUTS_DEBUG}"
GH_ENV_GLOBAL_VERSION="20250910"
GH_ENV_PACKAGES="${INPUTS_PACKAGES}"
GH_ENV_VERSION="${INPUTS_VERSION}"
#===================================================================================================
# Step ID: install-aptfast
#===================================================================================================
if ! apt-fast --version > /dev/null 2>&1; then
echo "Installing apt-fast for optimized installs and updates" &&
/bin/bash -c "$(curl -sL https://raw.githubusercontent.com/ilikenwf/apt-fast/master/quick-install.sh)"
fi
#===================================================================================================
# Step ID: setup-binary
#===================================================================================================
if [[ ! -f "${BINARY_PATH}" ]]; then
echo "Error: Binary not found at ${BINARY_PATH}"
echo "Please ensure the action has been properly built and binaries are included in the distribute directory"
exit 1
fi
#===================================================================================================
# Step ID: create-cache-key
#===================================================================================================
${BINARY_PATH} createkey \
-os-arch "${ARCH}" \
-plaintext-path "${CACHE_DIR}/cache_key.txt" \
-ciphertext-path "${CACHE_DIR}/cache_key.md5" \
-version "${VERSION}" \
-global-version "${GLOBAL_VERSION}" \
${PACKAGES}
GH_OUTPUT_CREATE_CACHE_KEY_CACHE_KEY="$(cat ${CACHE_DIR}/cache_key.md5)"
#===================================================================================================
# Step ID: load-cache
#===================================================================================================
STEP_LOAD_CACHE_WITH_PATH="${{ env.CACHE_DIR }}"
STEP_LOAD_CACHE_WITH_KEY="cache-apt-pkgs_${{ steps.create-cache-key.outputs.cache-key }}"
if [[ -d "${cache-apt-pkgs_${{ steps.create-cache-key.outputs.cache-key }}}" ]]; then
OUTPUT_CACHE_HIT=true
else
OUTPUT_CACHE_HIT=false
mkdir "${cache-apt-pkgs_${{ steps.create-cache-key.outputs.cache-key }}}"
fi
# NO HANDLER FOUND for actions/cache/restore@v4
#===================================================================================================
# Step ID: post-load-cache
#===================================================================================================
STEP_POST_LOAD_CACHE_ENV_CACHE_HIT="${{ steps.load-cache.outputs.cache-hit }}"
STEP_POST_LOAD_CACHE_ENV_EXEC_INSTALL_SCRIPTS="${INPUTS_EXECUTE_INSTALL_SCRIPTS}"
if [ "${CACHE_HIT}" == "true" ]; then
${BINARY_PATH} restore \
-cache-dir "${CACHE_DIR}" \
-restore-root "/" \
"${PACKAGES}"
else
${BINARY_PATH} install \
-cache-dir "${CACHE_DIR}" \
-version "${VERSION}" \
-global-version "${GLOBAL_VERSION}" \
"${PACKAGES}"
fi
GH_OUTPUT_POST_LOAD_CACHE_PACKAGE_VERSION_LIST="\"$(cat "${CACHE_DIR}/pkgs_args.txt")\""
GH_OUTPUT_POST_LOAD_CACHE_ALL_PACKAGE_VERSION_LIST="\"$(cat "${CACHE_DIR}/pkgs_installed.txt")\""
#===================================================================================================
# Step ID: upload-artifacts
#===================================================================================================
# NO HANDLER FOUND for actions/upload-artifact@v4
#===================================================================================================
# Step ID: save-cache
#===================================================================================================
# NO HANDLER FOUND for actions/cache/save@v4
#===================================================================================================
# Step ID: clean-cache
#===================================================================================================
rm -rf ~/cache-apt-pkgs

View file

@ -0,0 +1,99 @@
package main
import (
"fmt"
"strings"
"mvdan.cc/sh/v3/syntax"
)
// ParseBashToAST parses a bash script string into an AST
func ParseBashToAST(script string) (*syntax.File, error) {
// Create a new parser with bash dialect
parser := syntax.NewParser(syntax.KeepComments(true), syntax.Variant(syntax.LangBash))
// Parse the script into an AST
file, err := parser.Parse(strings.NewReader(script), "")
if err != nil {
return nil, fmt.Errorf("failed to parse bash script: %v", err)
}
return file, nil
}
// AnalyzeBashScript provides analysis of a bash script including variables, functions, and commands
func AnalyzeBashScript(script string) (map[string]interface{}, error) {
file, err := ParseBashToAST(script)
if err != nil {
return nil, err
}
analysis := make(map[string]interface{})
variables := make(map[string]struct{})
functions := make([]string, 0)
commands := make([]string, 0)
// Walk the AST and collect information
syntax.Walk(file, func(node syntax.Node) bool {
switch n := node.(type) {
case *syntax.Assign:
// Found variable assignment
if n.Name != nil {
variables[n.Name.Value] = struct{}{}
}
case *syntax.FuncDecl:
// Found function declaration
if n.Name != nil {
functions = append(functions, n.Name.Value)
}
case *syntax.CallExpr:
// Found command execution
if len(n.Args) > 0 {
var cmd strings.Builder
for _, part := range n.Args[0].Parts {
if lit, ok := part.(*syntax.Lit); ok {
cmd.WriteString(lit.Value)
}
}
if cmd.Len() > 0 {
commands = append(commands, cmd.String())
}
}
}
return true
})
// Convert variables map to slice for better JSON output
varSlice := make([]string, 0, len(variables))
for v := range variables {
varSlice = append(varSlice, v)
}
analysis["variables"] = varSlice
analysis["functions"] = functions
analysis["commands"] = commands
return analysis, nil
}
func ParseAndGetAst(action Action) (string, error) {
converter := NewBashConverter(action)
script := converter.Convert()
// Analyze the generated script
analysis, err := AnalyzeBashScript(script)
if err != nil {
return script, fmt.Errorf("script analysis error: %v", err)
}
// Add analysis as comments at the top of the script
var finalScript strings.Builder
finalScript.WriteString("#!/bin/bash\n\n")
finalScript.WriteString("# Script Analysis:\n")
finalScript.WriteString(fmt.Sprintf("# Variables: %v\n", analysis["variables"]))
finalScript.WriteString(fmt.Sprintf("# Functions: %v\n", analysis["functions"]))
finalScript.WriteString(fmt.Sprintf("# Commands: %v\n\n", analysis["commands"]))
finalScript.WriteString(script)
return finalScript.String(), nil
}

View file

@ -0,0 +1,251 @@
package main
import (
"bufio"
"fmt"
"regexp"
"strings"
)
// dedent removes common leading indentation from non-empty lines.
// It also normalizes CRLF -> LF and strips a single leading newline.
func dedent(s string) string {
s = strings.ReplaceAll(s, "\r\n", "\n")
s = strings.TrimPrefix(s, "\n")
lines := strings.Split(s, "\n")
min := -1
for _, ln := range lines {
if strings.TrimSpace(ln) == "" {
continue
}
ind := len(ln) - len(strings.TrimLeft(ln, " \t"))
if min == -1 || ind < min {
min = ind
}
}
if min <= 0 {
return s
}
for i, ln := range lines {
if len(ln) >= min {
lines[i] = ln[min:]
} else {
lines[i] = strings.TrimLeft(ln, " \t")
}
}
return strings.Join(lines, "\n")
}
type ScriptBuilder struct {
textBuilder strings.Builder
}
func (s *ScriptBuilder) WriteComment(format string, a ...any) {
var c strings.Builder
scanner := bufio.NewScanner(strings.NewReader(fmt.Sprintf(format, a...)))
for scanner.Scan() {
c.WriteString("# ")
c.WriteString(scanner.Text())
c.WriteByte('\n')
}
fmt.Fprint(&s.textBuilder, c.String())
}
func (s *ScriptBuilder) WriteCommentSection(format string, a ...any) {
s.WriteBlock("\n\n#" + strings.Repeat("=", 99))
s.WriteComment(format, a...)
s.WriteBlock("#" + strings.Repeat("=", 99) + "\n")
}
func (s *ScriptBuilder) WriteBlock(format string, a ...any) {
fmt.Fprintln(&s.textBuilder, fmt.Sprintf(dedent(format), a...))
}
func (s *ScriptBuilder) String() string {
return s.textBuilder.String()
}
type BashConverter struct {
action Action
scriptBuilder ScriptBuilder
githubVars githubVars
}
func NewBashConverter(action Action) *BashConverter {
githubVars := make(map[string]githubVar)
for _, v := range []githubVar{
newGithubVar("runner.arch", "X86_64"),
newGithubVar("github.action_path", "../../"),
newGithubVar("inputs.packages", "xdot,rolldice"),
newGithubVar("inputs.version", "0"),
newGithubVar("inputs.global_version", ""),
newGithubVar("inputs.execute_install_scripts", "false"),
newGithubVar("inputs.refresh", "false"),
newGithubVar("inputs.debug", "false"),
} {
githubVars[v.name] = v
}
return &BashConverter{
action: action,
scriptBuilder: ScriptBuilder{},
githubVars: githubVars,
}
}
func (b *BashConverter) Convert() string {
b.handleAction()
return b.scriptBuilder.String()
}
func (b *BashConverter) convertShellLines(step Step, lines string) string {
var result []string
scanner := bufio.NewScanner(strings.NewReader(lines))
for scanner.Scan() {
converted := b.convertShellLine(step, scanner.Text())
result = append(result, converted)
}
return strings.Join(result, "\n")
}
// echo\s+
func (b *BashConverter) convertShellLine(step Step, line string) string {
line = b.githubVars.convert(line)
env_pattern := `^\s*echo\s+"([\w\-_]+)=(.*)"\s*>>\s*.*GITHUB_ENV.*`
env_re := regexp.MustCompile(env_pattern)
if m := env_re.FindStringSubmatch(line); m != nil {
return fmt.Sprintf(`GH_ENV_%s="%s"`, convertToShellVar(m[1]), b.githubVars.convert(m[2]))
}
out_pattern := `^\s*echo\s+"([\w\-_]+)=(.*)"\s*>>\s*.*GITHUB_OUTPUT.*`
out_re := regexp.MustCompile(out_pattern)
if m := out_re.FindStringSubmatch(line); m != nil {
return fmt.Sprintf(
`GH_OUTPUT_%s_%s="%s"`,
convertToShellVar(step.ID),
convertToShellVar(m[1]),
b.githubVars.convert(m[2]),
)
}
return line
}
func (b *BashConverter) handleExternalAction(step Step) {
handlers := map[string]func(){
"actions/cache/restore@v4": func() {
path := b.convertShellLine(step, step.With["path"])
key := b.convertShellLine(step, step.With["key"])
shellVarPrefix := "STEP_" + convertToShellVar(step.ID) + "_WITH"
pathVar := fmt.Sprintf("%s_PATH", shellVarPrefix)
keyVar := fmt.Sprintf("%s_KEY", shellVarPrefix)
b.scriptBuilder.WriteBlock(`
%s="%s"
%s="%s"
if [[ -d "${%s}" ]]; then
OUTPUT_CACHE_HIT=true
else
OUTPUT_CACHE_HIT=false
mkdir "${%s}"
fi
`, pathVar, path, keyVar, key, key, key)
},
}
if handlers[step.Uses] != nil {
handlers[step.Uses]()
}
b.scriptBuilder.WriteComment("NO HANDLER FOUND for %s", step.Uses)
}
func convertToShellVar(name string) string {
return strings.ToUpper(strings.ReplaceAll(strings.ReplaceAll(name, ".", "_"), "-", "_"))
}
type githubVar struct {
name string
shellName string
shellVal string
}
func newGithubVar(name, shellVal string) githubVar {
return githubVar{
name: name,
shellName: convertToShellVar(name),
shellVal: shellVal,
}
}
type githubVars map[string]githubVar
func (v *githubVars) convert(line string) string {
// Build pattern to match ${{ var }} style variables
// The pattern matches any known github variable name
names := make([]string, 0, len(*v))
for name := range *v {
names = append(names, regexp.QuoteMeta(name))
}
pattern := fmt.Sprintf(`\${{[[:space:]]*(%s)[[:space:]]*}}`, strings.Join(names, "|"))
re := regexp.MustCompile(pattern)
return re.ReplaceAllStringFunc(line, func(match string) string {
// Extract the variable name from between ${{ and }}
varName := re.FindStringSubmatch(match)[1]
if gvar, ok := (*v)[varName]; ok {
// If the variable exists, replace with ${SHELL_VAR}
return fmt.Sprintf("${%s}", gvar.shellName)
}
// If variable not found, return original text
return match
})
}
func (b *BashConverter) handleAction() {
b.scriptBuilder.WriteBlock("#!/bin/bash\n")
b.scriptBuilder.WriteBlock(strings.Repeat("#", 100) + "\n#")
b.scriptBuilder.WriteComment("%s", b.action.ShortString())
b.scriptBuilder.WriteBlock(strings.Repeat("#", 100) + "\n")
b.scriptBuilder.WriteBlock("set -e\n")
for _, v := range b.githubVars {
if v.shellVal != "" {
b.scriptBuilder.WriteBlock(`%s="%s"`, v.shellName, v.shellVal)
}
}
for _, step := range b.action.Runs.Steps {
if step.ID != "" {
b.scriptBuilder.WriteCommentSection("Step ID: %s", step.ID)
} else {
b.scriptBuilder.WriteCommentSection("Step ID: n/a")
}
if step.Uses != "" {
b.handleExternalAction(step)
}
if len(step.Env) > 0 {
for k, v := range step.Env {
b.scriptBuilder.WriteBlock(
`STEP_%s_ENV_%s="%s"`,
convertToShellVar(step.ID),
convertToShellVar(k),
b.githubVars.convert(v),
)
}
}
if step.Shell != "" && step.Shell != "bash" {
b.scriptBuilder.WriteComment(
"Note: Original shell was %q, but this script uses bash.\n",
step.Shell,
)
}
if step.Run != "" {
b.scriptBuilder.WriteBlock("%s\n", b.convertShellLines(step, step.Run))
}
}
// b.scriptBuilder.WriteBlock(`
// #!/bin/bash
// set -e
// `)
}

30
cmd/parse_action/main.go Normal file
View file

@ -0,0 +1,30 @@
package main
import (
"fmt"
"log"
"os"
)
func Foo(action Action) {
converter := NewBashConverter(action)
bashScript := converter.Convert()
fmt.Println(bashScript)
const out = "action.sh"
if err := os.WriteFile(out, []byte(bashScript), 0o755); err != nil {
fmt.Println("write error:", err)
os.Exit(1)
}
fmt.Printf("Wrote script to %s\n", out)
}
func main() {
action, err := Parse("../../action.yml")
if err != nil {
log.Fatal(err)
}
txt, err := ParseAndGetAst(action)
fmt.Println(txt)
}

236
cmd/parse_action/parse.go Normal file
View file

@ -0,0 +1,236 @@
package main
import (
"fmt"
"os"
"strings"
"gopkg.in/yaml.v3"
)
const indentSize = 2
// Action represents the GitHub Action configuration structure
type Action struct {
Name string `yaml:"name"`
Description string `yaml:"description"`
Author string `yaml:"author"`
Branding Branding `yaml:"branding"`
Inputs Inputs `yaml:"inputs"`
Outputs Outputs `yaml:"outputs"`
Runs Runs `yaml:"runs"`
}
// Branding represents the action's branding configuration
type Branding struct {
Icon string `yaml:"icon"`
Color string `yaml:"color"`
}
// Inputs represents all input parameters for the action
type Inputs struct {
Packages Input `yaml:"packages"`
Version Input `yaml:"version"`
ExecuteInstallScripts Input `yaml:"execute_install_scripts"`
Refresh Input `yaml:"refresh"`
Debug Input `yaml:"debug"`
}
// Input represents a single input parameter configuration
type Input struct {
Description string `yaml:"description"`
Required bool `yaml:"required"`
Default string `yaml:"default"`
DeprecationMessage string `yaml:"deprecationMessage,omitempty"`
}
// Outputs represents all output parameters from the action
type Outputs struct {
CacheHit Output `yaml:"cache-hit"`
PackageVersionList Output `yaml:"package-version-list"`
AllPackageVersionList Output `yaml:"all-package-version-list"`
}
// Output represents a single output parameter configuration
type Output struct {
Description string `yaml:"description"`
Value string `yaml:"value"`
}
// Runs represents the action's execution configuration
type Runs struct {
Using string `yaml:"using"`
Env map[string]string `yaml:"env"`
Steps []Step `yaml:"steps"`
}
// Step represents a single step in the action's execution
type Step struct {
ID string `yaml:"id"`
Uses string `yaml:"uses"`
With map[string]string `yaml:"with"`
Shell string `yaml:"shell"`
Run string `yaml:"run"`
Env map[string]string `yaml:"env"`
}
// String implements fmt.Stringer for Action
func (a Action) String() string {
var b strings.Builder
b.WriteString(a.ShortString())
b.WriteString("\nRuns:\n")
b.WriteString(indent(a.Runs.String(), 1))
return b.String()
}
// ShortString implements fmt.Stringer for Action but with runs trimmed out
func (a Action) ShortString() string {
var b strings.Builder
b.WriteString(fmt.Sprintf("Name: %s\n", a.Name))
b.WriteString(fmt.Sprintf("Description: %s\n", a.Description))
b.WriteString(fmt.Sprintf("Author: %s\n", a.Author))
b.WriteString("\nBranding:\n")
b.WriteString(indent(a.Branding.String(), 1))
b.WriteString("\nInputs:\n")
b.WriteString(indent(a.Inputs.String(), 1))
b.WriteString("\nOutputs:\n")
b.WriteString(indent(a.Outputs.String(), 1))
return b.String()
}
// String implements fmt.Stringer for Branding
func (b Branding) String() string {
return fmt.Sprintf("Icon: %s\nColor: %s", b.Icon, b.Color)
}
// String implements fmt.Stringer for Inputs
func (i Inputs) String() string {
var b strings.Builder
b.WriteString("Packages:\n")
b.WriteString(indent(i.Packages.String(), 1))
b.WriteString("Version:\n")
b.WriteString(indent(i.Version.String(), 1))
b.WriteString("Execute Install Scripts:\n")
b.WriteString(indent(i.ExecuteInstallScripts.String(), 1))
b.WriteString("Refresh:\n")
b.WriteString(indent(i.Refresh.String(), 1))
b.WriteString("Debug:\n")
b.WriteString(indent(i.Debug.String(), 1))
return b.String()
}
// String implements fmt.Stringer for Input
func (i Input) String() string {
var b strings.Builder
b.WriteString(fmt.Sprintf("Description: %s\n", i.Description))
b.WriteString(fmt.Sprintf("Required: %v\n", i.Required))
b.WriteString(fmt.Sprintf("Default: %s", i.Default))
if i.DeprecationMessage != "" {
b.WriteString(fmt.Sprintf("\nDeprecation Message: %s", i.DeprecationMessage))
}
return b.String()
}
// String implements fmt.Stringer for Outputs
func (o Outputs) String() string {
var b strings.Builder
b.WriteString("Cache Hit:\n")
b.WriteString(indent(o.CacheHit.String(), 1))
b.WriteString("Package Version List:\n")
b.WriteString(indent(o.PackageVersionList.String(), 1))
b.WriteString("All Package Version List:\n")
b.WriteString(indent(o.AllPackageVersionList.String(), 1))
return b.String()
}
// String implements fmt.Stringer for Output
func (o Output) String() string {
return fmt.Sprintf("Description: %s\nValue: %s", o.Description, o.Value)
}
// String implements fmt.Stringer for Runs
func (r Runs) String() string {
var b strings.Builder
b.WriteString(fmt.Sprintf("Using: %s\n", r.Using))
b.WriteString("Environment:\n")
for k, v := range r.Env {
b.WriteString(indent(fmt.Sprintf("%s: %s\n", k, v), 1))
}
b.WriteString("Steps:\n")
for _, step := range r.Steps {
b.WriteString(indent(step.String()+"\n", 1))
}
return b.String()
}
// String implements fmt.Stringer for Step
func (s Step) String() string {
var b strings.Builder
if s.ID != "" {
b.WriteString(fmt.Sprintf("ID: %s\n", s.ID))
}
if len(s.With) > 0 {
b.WriteString("With:\n")
for k, v := range s.With {
b.WriteString(fmt.Sprintf("%s: %s\n", k, v))
}
}
if s.Shell != "" {
b.WriteString(fmt.Sprintf("Shell: %s\n", s.Shell))
}
if s.Run != "" {
b.WriteString(fmt.Sprintf("Run:\n%s", indent(s.Run, 1)))
}
return strings.TrimSuffix(b.String(), "\n")
}
// indent adds the specified number of indentation levels to each line of the input string
func indent(s string, level int) string {
if s == "" {
return s
}
prefix := strings.Repeat(" ", level*indentSize)
lines := strings.Split(s, "\n")
for i, line := range lines {
if line != "" {
lines[i] = prefix + line
}
}
return strings.Join(lines, "\n") + "\n"
}
func Parse(yamlFilePath string) (Action, error) {
// Read the action.yml file
data, err := os.ReadFile(yamlFilePath)
if err != nil {
return Action{}, fmt.Errorf("Error reading %s: %v", yamlFilePath, err)
}
// Parse the YAML into our Action struct
var action Action
if err := yaml.Unmarshal(data, &action); err != nil {
return Action{}, fmt.Errorf("Error parsing YAML: %v", err)
}
return action, nil
}

21
dev.md
View file

@ -1,21 +0,0 @@
# cache-apt-pkgs-action - Development
To develop and run tests you will need to setup your system.
## Environment
1. The project requires Go 1.23 or later.
2. Set GO111MODULE to auto:
```bash
# One-time setup
go env -w GO111MODULE=auto
# Or use the provided setup script
./scripts/setup_dev.sh
```
3. The project includes a `.env` file with required settings.
## Action Testing

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

8
go.mod
View file

@ -1,16 +1,16 @@
module awalsh128.com/cache-apt-pkgs-action
go 1.23
toolchain go1.23.4
go 1.24
require (
github.com/awalsh128/syspkg v0.1.5
github.com/stretchr/testify v1.11.0
)
require mvdan.cc/sh/v3 v3.12.0 // indirect
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
gopkg.in/yaml.v3 v3.0.1
)

2
go.sum
View file

@ -10,3 +10,5 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
mvdan.cc/sh/v3 v3.12.0 h1:ejKUR7ONP5bb+UGHGEG/k9V5+pRVIyD+LsZz7o8KHrI=
mvdan.cc/sh/v3 v3.12.0/go.mod h1:Se6Cj17eYSn+sNooLZiEUnNNmNxg0imoYlTu4CyaGyg=

296
internal/cache/key.go vendored
View file

@ -1,8 +1,25 @@
// Package cache provides caching functionality for APT packages and their metadata.
// Package cache provides functionality for managing APT package cache keys.
// It handles the creation, serialization, and validation of cache keys that uniquely
// identify sets of packages for caching in GitHub Actions.
//
// Example usage:
//
// // Create a new cache key
// key := cache.NewKey(packages, "v1.0", "v2", "amd64")
//
// // Write the key to files
// err := key.Write("key.txt", "key.md5")
// if err != nil {
// log.Fatal(err)
// }
//
// // Read and validate a key
// plaintext, hash, err := cache.ReadKey("key.txt", "key.md5")
package cache
import (
"crypto/md5"
"encoding/json"
"fmt"
"os"
@ -12,49 +29,274 @@ import (
// Key represents a unique identifier for a package cache entry.
// It combines package information with version and architecture details to create
// a deterministic cache key.
// a deterministic cache key. Keys are immutable after creation and generate
// consistent hashes by maintaining sorted package order and using a fixed string format.
type Key struct {
// Packages is a sorted list of packages to be cached
// This is guaranteed by the pkgs.Packages interface
Packages pkgs.Packages
// Version is the user-specified cache version
Version string
// GlobalVersion is the action's global version, used for cache invalidation
GlobalVersion string
// OsArch is the target architecture (e.g., amd64, arm64)
OsArch string
packages pkgs.Packages // Sorted list of packages to be cached
version string // User-specified cache version
globalVersion string // Action's global version for cache invalidation
osArch string // Target architecture (e.g., amd64, arm64)
}
// PlainText returns a human-readable string representation of the cache key.
// File permissions for written key files
const (
keyFileMode = 0644
)
// NewKey creates a new cache key with the specified parameters.
// The packages are already sorted when constructed to provide a deterministic order and hash.
func NewKey(packages pkgs.Packages, version, globalVersion, osArch string) (Key, error) {
missingFields := []string{}
if globalVersion == "" {
missingFields = append(missingFields, "globalVersion")
}
if osArch == "" {
missingFields = append(missingFields, "osArch")
}
if len(missingFields) > 0 {
return Key{}, fmt.Errorf("missing required fields: %v", missingFields)
}
return Key{
packages: packages,
version: version,
globalVersion: globalVersion,
osArch: osArch,
}, nil
}
// Packages returns the packages associated with this cache key.
// The returned value is guaranteed to be sorted.
func (k Key) Packages() pkgs.Packages {
return k.packages
}
// Version returns the user-specified cache version.
func (k Key) Version() string {
return k.version
}
// GlobalVersion returns the action's global version used for cache invalidation.
func (k Key) GlobalVersion() string {
return k.globalVersion
}
// OsArch returns the target architecture.
func (k Key) OsArch() string {
return k.osArch
}
// String returns a human-readable string representation of the cache key.
// The output format is deterministic since Packages are guaranteed to be sorted.
func (k *Key) PlainText() string {
// This method implements the fmt.Stringer interface.
func (k Key) String() string {
return fmt.Sprintf("Packages: '%s', Version: '%s', GlobalVersion: '%s', OsArch: '%s'",
k.Packages.String(), k.Version, k.GlobalVersion, k.OsArch)
k.packages.String(), k.version, k.globalVersion, k.osArch)
}
// Hash generates a deterministic MD5 hash of the key's contents.
// This hash is used as the actual cache key for storage and lookup.
func (k *Key) Hash() []byte {
hash := md5.Sum([]byte(k.PlainText()))
//
// Note: MD5 is used here for speed and determinism, not cryptographic security.
// The hash is based on the string representation to ensure consistency.
func (k Key) Hash() []byte {
hash := md5.Sum([]byte(k.String()))
return hash[:]
}
// WriteError represents an error that occurred during key writing operations.
// It provides context about which file and operation failed, along with the underlying error.
// This type implements both the error interface and error unwrapping.
type WriteError struct {
Path string // File path that failed
Operation string // Operation being performed (plaintext/hash)
Err error // Underlying error that occurred
}
// Error implements the error interface.
func (e *WriteError) Error() string {
return fmt.Sprintf("failed to write %s to %s: %v", e.Operation, e.Path, e.Err)
}
// Unwrap returns the underlying error for error unwrapping.
func (e *WriteError) Unwrap() error {
return e.Err
}
// Write stores both the plaintext and hashed versions of the cache key to files.
// This allows for both human inspection and fast cache lookups.
func (k *Key) Write(plaintextPath string, ciphertextPath string) error {
keyText := k.PlainText()
logging.Info("Writing cache key plaintext to %s.", plaintextPath)
if err := os.WriteFile(plaintextPath, []byte(keyText), 0644); err != nil {
return fmt.Errorf("write failed to %s: %w", plaintextPath, err)
//
// The method writes:
// - Human-readable key representation to plaintextPath
// - Binary hash of the key to hashPath
//
// Both operations must succeed for the method to return nil.
// If either operation fails, no partial state is left behind.
func (k Key) Write(plaintextPath, hashPath string) error {
// Write plaintext representation
if err := k.writePlaintext(plaintextPath); err != nil {
return &WriteError{
Path: plaintextPath,
Operation: "plaintext key",
Err: err,
}
}
logging.Info("Completed writing cache key plaintext.")
keyHash := k.Hash()
logging.Info("Writing cache key hash to %s.", ciphertextPath)
if err := os.WriteFile(ciphertextPath, keyHash[:], 0644); err != nil {
return fmt.Errorf("write failed to %s: %w", ciphertextPath, err)
// Write hash representation
if err := k.writeHash(hashPath); err != nil {
// Attempt cleanup of plaintext file on hash write failure
if removeErr := os.Remove(plaintextPath); removeErr != nil {
logging.Info("Failed to cleanup plaintext file %s after hash write failure: %v",
plaintextPath, removeErr)
}
return &WriteError{
Path: hashPath,
Operation: "hash key",
Err: err,
}
}
logging.Info("Completed writing cache key hash.")
return nil
}
// writePlaintext writes the human-readable key representation to the specified path.
func (k Key) writePlaintext(path string) error {
keyText := k.String()
logging.Info("Writing cache key plaintext to %s", path)
if err := os.WriteFile(path, []byte(keyText), keyFileMode); err != nil {
return fmt.Errorf("write plaintext: %w", err)
}
logging.Info("Completed writing cache key plaintext")
return nil
}
// writeHash writes the binary hash to the specified path.
func (k Key) writeHash(path string) error {
keyHash := k.Hash()
logging.Info("Writing cache key hash to %s", path)
if err := os.WriteFile(path, keyHash, keyFileMode); err != nil {
return fmt.Errorf("write hash: %w", err)
}
logging.Info("Completed writing cache key hash")
return nil
}
// ReadKey reads and validates a cache key from the filesystem.
// It returns the plaintext content and hash, or an error if validation fails.
//
// This function verifies that:
// - Both files exist and are readable
// - The hash file contains exactly 16 bytes (MD5 hash size)
// - The computed hash of plaintext matches the stored hash
func ReadKey(plaintextPath, hashPath string) (plaintext string, hash []byte, err error) {
if checkFileExists(plaintextPath) != nil {
return "", nil, fmt.Errorf("plaintext file does not exist: %s", plaintextPath)
}
if checkFileExists(hashPath) != nil {
return "", nil, fmt.Errorf("hash file does not exist: %s", hashPath)
}
// Read plaintext
plaintextBytes, err := os.ReadFile(plaintextPath)
if err != nil {
return "", nil, fmt.Errorf("read plaintext from %s: %w", plaintextPath, err)
}
plaintext = string(plaintextBytes)
// Read stored hash
storedHash, err := os.ReadFile(hashPath)
if err != nil {
return "", nil, fmt.Errorf("read hash from %s: %w", hashPath, err)
}
// Validate hash length
if len(storedHash) != md5.Size {
return "", nil, fmt.Errorf("invalid hash length in %s: got %d bytes, want %d",
hashPath, len(storedHash), md5.Size)
}
// Verify hash matches plaintext
computedHash := md5.Sum(plaintextBytes)
if string(computedHash[:]) != string(storedHash) {
return "", nil, fmt.Errorf("hash mismatch: stored hash does not match plaintext content")
}
return plaintext, storedHash, nil
}
// KeyExists checks if both plaintext and hash key files exist at the specified paths.
// It returns true only if both files exist and are accessible.
func KeyExists(plaintextPath, hashPath string) bool {
if err := checkFileExists(plaintextPath); err != nil {
return false
}
if err := checkFileExists(hashPath); err != nil {
return false
}
return true
}
// checkFileExists verifies that a file exists and is accessible.
func checkFileExists(path string) error {
info, err := os.Stat(path)
if err != nil {
return err
}
if info.IsDir() {
return fmt.Errorf("path is a directory: %s", path)
}
return nil
}
// keyJSON is a helper struct for JSON serialization of Key.
// It uses exported fields and concrete types that JSON can handle.
type keyJSON struct {
Packages []pkgs.Package `json:"packages"`
Version string `json:"version"`
GlobalVersion string `json:"globalVersion"`
OsArch string `json:"osArch"`
}
// MarshalJSON implements custom JSON marshaling for Key.
func (k Key) MarshalJSON() ([]byte, error) {
// Convert packages to slice
pkgSlice := make([]pkgs.Package, k.packages.Len())
for i := 0; i < k.packages.Len(); i++ {
pkgSlice[i] = *k.packages.Get(i)
}
keyData := keyJSON{
Packages: pkgSlice,
Version: k.version,
GlobalVersion: k.globalVersion,
OsArch: k.osArch,
}
return json.Marshal(keyData)
}
// UnmarshalJSON implements custom JSON unmarshaling for Key.
func (k *Key) UnmarshalJSON(data []byte) error {
var keyData keyJSON
if err := json.Unmarshal(data, &keyData); err != nil {
return err
}
// Convert slice back to Packages interface
packages := pkgs.NewPackages(keyData.Packages...)
k.packages = packages
k.version = keyData.Version
k.globalVersion = keyData.GlobalVersion
k.osArch = keyData.OsArch
return nil
}

View file

@ -2,220 +2,322 @@ package cache
import (
"bytes"
"fmt"
"os"
"path"
"path/filepath"
"testing"
"awalsh128.com/cache-apt-pkgs-action/internal/pkgs"
)
const (
pkg1 = "xdot=1.3-1"
pkg2 = "rolldice=1.16-1build3"
package1 = "xdot=1.3-1"
package2 = "rolldice=1.16-1build3"
version1 = "test1"
version2 = "test2"
version = "test"
globalV1 = "v1"
globalV2 = "v2"
arch1 = "amd64"
arch2 = "x86"
globalVersion1 = "v1"
globalVersion2 = "v2"
archAmd64 = "amd64"
archX86 = "x86"
)
func TestKey_PlainText(t *testing.T) {
emptyKey := Key{
Packages: pkgs.NewPackagesFromStrings(),
Version: "",
GlobalVersion: "",
OsArch: "",
}
singleKey := Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version,
GlobalVersion: globalV2,
OsArch: arch1,
}
multiKey := Key{
Packages: pkgs.NewPackagesFromStrings(pkg1, pkg2),
Version: version,
GlobalVersion: globalV2,
OsArch: arch1,
}
//==============================================================================
// Helper Functions
//==============================================================================
cases := []struct {
name string
key Key
expected string
func createKey(t *testing.T, packages []string, version, globalVersion, osArch string) Key {
t.Helper()
key, err := NewKey(
pkgs.NewPackagesFromStrings(packages...),
version,
globalVersion,
osArch,
)
if err != nil {
t.Fatalf("Failed to create key: %v", err)
}
return key
}
func assertStringEquals(t *testing.T, key Key, expected string) {
t.Helper()
actual := key.String()
if actual != expected {
t.Errorf("String() = %q, expected %q", actual, expected)
}
}
func assertHashesEqual(t *testing.T, key1, key2 Key) {
t.Helper()
hash1 := key1.Hash()
hash2 := key2.Hash()
if !bytes.Equal(hash1, hash2) {
t.Errorf("Hashes should be equal: key1=%x, key2=%x", hash1, hash2)
}
}
func assertHashesDifferent(t *testing.T, key1, key2 Key) {
t.Helper()
hash1 := key1.Hash()
hash2 := key2.Hash()
if bytes.Equal(hash1, hash2) {
t.Errorf("Hashes should be different but were equal: %x", hash1)
}
}
func assertFileContentEquals(t *testing.T, filePath string, expected []byte) {
t.Helper()
actual, err := os.ReadFile(filePath)
if err != nil {
t.Fatalf("Failed to read file %s: %v", filePath, err)
}
if !bytes.Equal(actual, expected) {
t.Errorf("File content mismatch in %s: actual %q, expected %q", filePath, actual, expected)
}
}
//==============================================================================
// String Tests
//==============================================================================
func TestKeyString_WithEmptyKey_ReturnsError(t *testing.T) {
// Arrange & Act
_, err := NewKey(
pkgs.NewPackagesFromStrings(),
"",
"",
"",
)
// Assert
if err == nil {
t.Error("Expected error but got nil")
}
}
func TestKeyString_WithSinglePackage_ReturnsFormattedString(t *testing.T) {
// Arrange
key := createKey(t, []string{package1}, version1, globalVersion2, archAmd64)
expected := fmt.Sprintf(
"Packages: '%s', Version: '%s', GlobalVersion: '%s', OsArch: '%s'",
package1,
version1,
globalVersion2,
archAmd64,
)
// Act & Assert
assertStringEquals(t, key, expected)
}
func TestKeyString_WithMultiplePackages_ReturnsCommaSeparatedString(t *testing.T) {
// Arrange
key := createKey(
t,
[]string{package1, package2}, // xdot=1.3-1, rolldice=1.16-1build3
version1,
globalVersion2,
archAmd64,
)
// Packages are sorted, so "rolldice" comes before "xdot"
expected := fmt.Sprintf(
"Packages: '%s %s', Version: '%s', GlobalVersion: '%s', OsArch: '%s'",
package2,
package1,
version1,
globalVersion2,
archAmd64,
)
// Act & Assert
assertStringEquals(t, key, expected)
}
//==============================================================================
// Hash Tests
//==============================================================================
func TestKeyHash_WithIdenticalKeys_ReturnsSameHash(t *testing.T) {
// Arrange
key1 := createKey(t, []string{package1}, version1, globalVersion2, archAmd64)
key2 := createKey(t, []string{package1}, version1, globalVersion2, archAmd64)
// Act & Assert
assertHashesEqual(t, key1, key2)
}
func TestKeyHash_WithDifferences_ReturnsDifferentHash(t *testing.T) {
tests := []struct {
name string
key1 Key
key2 Key
}{
{
name: "Empty key",
key: emptyKey,
expected: "Packages: '', Version: '', GlobalVersion: '', OsArch: ''",
name: "Different packages",
key1: createKey(t, []string{package1}, version1, globalVersion1, archAmd64),
key2: createKey(t, []string{package2}, version1, globalVersion1, archAmd64),
},
{
name: "Single package",
key: singleKey,
expected: "Packages: 'xdot=1.3-1', Version: 'test', GlobalVersion: 'v2', OsArch: 'amd64'",
name: "Different versions",
key1: createKey(t, []string{package1}, version1, globalVersion1, archAmd64),
key2: createKey(t, []string{package2}, version2, globalVersion1, archAmd64),
},
{
name: "Multiple packages",
key: multiKey,
expected: "Packages: 'xdot=1.3-1,rolldice=1.16-1build3', Version: 'test', GlobalVersion: 'v2', OsArch: 'amd64'",
name: "Different global versions",
key1: createKey(t, []string{package1}, version1, globalVersion1, archAmd64),
key2: createKey(t, []string{package2}, version1, globalVersion2, archAmd64),
},
{
name: "Different architectures",
key1: createKey(t, []string{package1}, version1, globalVersion1, archAmd64),
key2: createKey(t, []string{package1}, version1, globalVersion2, archX86),
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
result := c.key.PlainText()
if result != c.expected {
t.Errorf("PlainText() = %v, want %v", result, c.expected)
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assertHashesDifferent(t, tt.key1, tt.key2)
})
}
}
func TestKey_Hash(t *testing.T) {
cases := []struct {
name string
key1 Key
key2 Key
wantSame bool
}{
{
name: "Same keys hash to same value",
key1: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version,
GlobalVersion: globalV2,
OsArch: arch1,
},
key2: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version,
GlobalVersion: globalV2,
OsArch: arch1,
},
wantSame: true,
},
{
name: "Different packages hash to different values",
key1: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version,
GlobalVersion: globalV2,
OsArch: arch1,
},
key2: Key{
Packages: pkgs.NewPackagesFromStrings(pkg2),
Version: version,
GlobalVersion: globalV2,
OsArch: arch1,
},
wantSame: false,
},
{
name: "Different versions hash to different values",
key1: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version1,
GlobalVersion: globalV2,
OsArch: arch1,
},
key2: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version2,
GlobalVersion: globalV2,
OsArch: arch1,
},
wantSame: false,
},
{
name: "Different global versions hash to different values",
key1: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version1,
GlobalVersion: globalV1,
OsArch: arch1,
},
key2: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version2,
GlobalVersion: globalV2,
OsArch: arch1,
},
wantSame: false,
},
{
name: "Different OS arches hash to different values",
key1: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version1,
GlobalVersion: globalV1,
OsArch: arch1,
},
key2: Key{
Packages: pkgs.NewPackagesFromStrings(pkg1),
Version: version2,
GlobalVersion: globalV2,
OsArch: arch2,
},
wantSame: false,
},
//==============================================================================
// Write Tests
//==============================================================================
func TestKeyWrite_WithValidPaths_WritesPlaintextAndHash(t *testing.T) {
// Arrange
key := createKey(
t,
[]string{package1, package2},
version1,
globalVersion2,
archAmd64,
)
plaintextPath := filepath.Join(t.TempDir(), "key.txt")
ciphertextPath := filepath.Join(t.TempDir(), "key.md5")
// Act
err := key.Write(plaintextPath, ciphertextPath)
// Assert
if err != nil {
t.Fatalf("Write() failed: %v", err)
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
hash1 := c.key1.Hash()
hash2 := c.key2.Hash()
if bytes.Equal(hash1, hash2) != c.wantSame {
t.Errorf("Hash equality = %v, want %v", bytes.Equal(hash1, hash2), c.wantSame)
}
})
// Verify plaintext file
expectedPlaintext := []byte(key.String())
assertFileContentEquals(t, plaintextPath, expectedPlaintext)
// Verify hash file
expectedHash := key.Hash()
assertFileContentEquals(t, ciphertextPath, expectedHash)
}
func TestKeyWrite_WithInvalidPlaintextPath_ReturnsError(t *testing.T) {
// Arrange
key := createKey(t, []string{package1}, version1, globalVersion2, archAmd64)
invalidPath := "/invalid/path/key.txt"
validPath := filepath.Join(t.TempDir(), "key.md5")
// Act
err := key.Write(invalidPath, validPath)
// Assert
if err == nil {
t.Error("Write() should have failed with invalid plaintext path")
}
}
func TestKey_WriteKeyPlaintext_RoundTripsSameValue(t *testing.T) {
key := Key{
Packages: pkgs.NewPackagesFromStrings(pkg1, pkg2),
Version: version,
GlobalVersion: globalV2,
OsArch: arch1,
func TestKeyWrite_WithInvalidCiphertextPath_ReturnsError(t *testing.T) {
// Arrange
key := createKey(t, []string{package1}, version1, globalVersion2, archAmd64)
validPath := filepath.Join(t.TempDir(), "key.txt")
invalidPath := "/invalid/path/key.md5"
// Act
err := key.Write(validPath, invalidPath)
// Assert
if err == nil {
t.Error("Write() should have failed with invalid ciphertext path")
}
plaintextPath := path.Join(t.TempDir(), "key.txt")
ciphertextPath := path.Join(t.TempDir(), "key.md5")
}
//==============================================================================
// Integration Tests
//==============================================================================
func TestKeyWriteAndRead_PlaintextRoundTrip_PreservesContent(t *testing.T) {
// Arrange
key := createKey(
t,
[]string{package1, package2},
version1,
globalVersion2,
archAmd64,
)
tempDir := t.TempDir()
plaintextPath := filepath.Join(tempDir, "key.txt")
ciphertextPath := filepath.Join(tempDir, "key.md5")
// Act
err := key.Write(plaintextPath, ciphertextPath)
if err != nil {
t.Fatalf("Write() failed: %v", err)
}
plaintextBytes, err := os.ReadFile(plaintextPath)
if err != nil {
t.Fatalf("ReadAll() failed: %v", err)
t.Fatalf("ReadFile() failed: %v", err)
}
plaintext := string(plaintextBytes)
if plaintext != key.PlainText() {
t.Errorf("Round trip failed: got %q, want %q", plaintext, key.PlainText())
// Assert
actualPlaintext := string(plaintextBytes)
expectedPlaintext := key.String()
if actualPlaintext != expectedPlaintext {
t.Errorf(
"Plaintext round trip failed: actual %q, expected %q",
actualPlaintext,
expectedPlaintext,
)
}
}
func TestKey_WriteKeyCiphertext_RoundTripsSameValue(t *testing.T) {
key := Key{
Packages: pkgs.NewPackagesFromStrings(pkg1, pkg2),
Version: version,
GlobalVersion: globalV2,
OsArch: arch1,
}
plaintextPath := path.Join(t.TempDir(), "key.txt")
ciphertextPath := path.Join(t.TempDir(), "key.md5")
func TestKeyWriteAndRead_CiphertextRoundTrip_PreservesHash(t *testing.T) {
// Arrange
key := createKey(
t,
[]string{package1, package2},
version1,
globalVersion2,
archAmd64,
)
tempDir := t.TempDir()
plaintextPath := filepath.Join(tempDir, "key.txt")
ciphertextPath := filepath.Join(tempDir, "key.md5")
// Act
err := key.Write(plaintextPath, ciphertextPath)
if err != nil {
t.Fatalf("Write() failed: %v", err)
}
ciphertextBytes, err := os.ReadFile(ciphertextPath)
if err != nil {
t.Fatalf("ReadAll() failed: %v", err)
t.Fatalf("ReadFile() failed: %v", err)
}
ciphertext := string(ciphertextBytes)
if !bytes.Equal(ciphertextBytes, key.Hash()) {
t.Errorf("Round trip failed: got %q, want %q", ciphertext, key.Hash())
// Assert
expectedHash := key.Hash()
if !bytes.Equal(ciphertextBytes, expectedHash) {
t.Errorf(
"Ciphertext round trip failed: actual %x, expected %x",
ciphertextBytes,
expectedHash,
)
}
}

View file

@ -1,3 +1,32 @@
// manifest.go
//
// Description:
//
// Provides types and functions for managing cache manifests and keys, including serialization,
// deserialization, and validation of package metadata.
//
// Package: cache
//
// Example usage:
//
// // Reading a manifest from file
// manifest, err := cache.Read("/path/to/manifest.json")
// if err != nil {
// log.Fatal(err)
// }
// fmt.Println("Packages:", manifest.InstalledPackages)
//
// // Writing a manifest to file
// err = cache.Write("/path/to/manifest.json", manifest)
// if err != nil {
// log.Fatal(err)
// }
//
// // Writing GitHub outputs
// err = cache.WriteGithubOutputs("/path/to/outputs.txt", manifest)
// if err != nil {
// log.Fatal(err)
// }
package cache
import (

View file

@ -15,40 +15,42 @@ import (
)
const (
manifestVersion = "1.0.0"
manifestGlobalVer = "v2"
manifestArch = "amd64"
manifestFile = "manifest.json"
samplePkgName = "xdot"
samplePkgVersion = "1.3-1"
samplePkgBinPath = "/usr/bin/xdot"
samplePkgDocPath = "/usr/share/doc/xdot"
version = "1.0.0"
globalVer = "20250901"
arch = "amd64"
file = "manifest.json"
pkgName = "xdot"
pkgVersion = "1.3-1"
pkgBinPath = "/usr/bin/xdot"
pkgDocPath = "/usr/share/doc/xdot"
)
var (
fixedTime = time.Date(2025, 8, 28, 10, 0, 0, 0, time.UTC)
emptyPkgs = pkgs.NewPackages()
sampleKey = Key{
Packages: emptyPkgs,
Version: manifestVersion,
GlobalVersion: manifestGlobalVer,
OsArch: manifestArch,
key = createTestKey()
pkg1 = pkgs.Package{
Name: pkgName,
Version: pkgVersion,
}
sampleManifest = &Manifest{
CacheKey: sampleKey,
LastModified: fixedTime,
InstalledPackages: []ManifestPackage{},
pkg2 = pkgs.Package{
Name: "zlib",
Version: "1.1.0",
}
samplePackage = pkgs.Package{
Name: samplePkgName,
Version: samplePkgVersion,
}
sampleFilePaths = []string{samplePkgBinPath, samplePkgDocPath}
filepaths = []string{pkgBinPath, pkgDocPath}
)
func createTestKey() Key {
key, err := NewKey(emptyPkgs, version, globalVer, arch)
if err != nil {
panic("Failed to create test key: " + err.Error())
}
return key
}
func createManifestFile(t *testing.T, dir string, m *Manifest) string {
t.Helper()
path := filepath.Join(dir, manifestFile)
path := filepath.Join(dir, file)
data, err := json.Marshal(m)
if err != nil {
t.Fatalf("Failed to marshal manifest: %v", err)
@ -62,14 +64,14 @@ func createManifestFile(t *testing.T, dir string, m *Manifest) string {
func TestNewManifest_WithEmptyPackages_CreatesValidStructure(t *testing.T) {
// Arrange
expected := &Manifest{
CacheKey: sampleKey,
CacheKey: key,
LastModified: fixedTime,
InstalledPackages: []ManifestPackage{},
}
// Act
actual := &Manifest{
CacheKey: sampleKey,
CacheKey: key,
LastModified: fixedTime,
InstalledPackages: []ManifestPackage{},
}
@ -81,24 +83,24 @@ func TestNewManifest_WithEmptyPackages_CreatesValidStructure(t *testing.T) {
func TestNewManifest_WithSinglePackage_CreatesValidStructure(t *testing.T) {
// Arrange
expected := &Manifest{
CacheKey: sampleKey,
CacheKey: key,
LastModified: fixedTime,
InstalledPackages: []ManifestPackage{
{
Package: samplePackage,
Filepaths: sampleFilePaths,
Package: pkg1,
Filepaths: filepaths,
},
},
}
// Act
actual := &Manifest{
CacheKey: sampleKey,
CacheKey: key,
LastModified: fixedTime,
InstalledPackages: []ManifestPackage{
{
Package: samplePackage,
Filepaths: sampleFilePaths,
Package: pkg1,
Filepaths: filepaths,
},
},
}
@ -113,11 +115,15 @@ func assertManifestEquals(t *testing.T, expected, actual *Manifest) {
if !reflect.DeepEqual(actual.CacheKey, expected.CacheKey) {
t.Errorf("CacheKey = %v, want %v", actual.CacheKey, expected.CacheKey)
}
if !reflect.DeepEqual(actual.LastModified, expected.LastModified) {
if !actual.LastModified.Equal(expected.LastModified) {
t.Errorf("LastModified = %v, want %v", actual.LastModified, expected.LastModified)
}
if !reflect.DeepEqual(actual.InstalledPackages, expected.InstalledPackages) {
t.Errorf("InstalledPackages = %v, want %v", actual.InstalledPackages, expected.InstalledPackages)
t.Errorf(
"InstalledPackages = %v, want %v",
actual.InstalledPackages,
expected.InstalledPackages,
)
}
}
@ -125,12 +131,12 @@ func TestRead_WithValidManifest_ReturnsMatchingStruct(t *testing.T) {
// Arrange
dir := t.TempDir()
expected := &Manifest{
CacheKey: sampleKey,
CacheKey: key,
LastModified: fixedTime,
InstalledPackages: []ManifestPackage{
{
Package: samplePackage,
Filepaths: sampleFilePaths,
Package: pkg1,
Filepaths: filepaths,
},
},
}
@ -138,7 +144,6 @@ func TestRead_WithValidManifest_ReturnsMatchingStruct(t *testing.T) {
// Act
actual, err := Read(path)
// Assert
if err != nil {
t.Fatalf("Read() error = %v", err)
@ -150,10 +155,10 @@ func TestRead_WithNonExistentFile_ReturnsError(t *testing.T) {
// Arrange
dir := t.TempDir()
path := filepath.Join(dir, "nonexistent.json")
// Act
actual, err := Read(path)
// Assert
assertError(t, err, "no such file or directory")
assert.Nil(t, actual)
@ -162,14 +167,14 @@ func TestRead_WithNonExistentFile_ReturnsError(t *testing.T) {
func TestRead_WithInvalidJSON_ReturnsError(t *testing.T) {
// Arrange
dir := t.TempDir()
path := filepath.Join(dir, manifestFile)
path := filepath.Join(dir, file)
if err := os.WriteFile(path, []byte("invalid json"), 0644); err != nil {
t.Fatalf("Failed to write test file: %v", err)
}
// Act
actual, err := Read(path)
// Assert
assertError(t, err, "failed to unmarshal")
assert.Nil(t, actual)
@ -189,8 +194,7 @@ func assertError(t *testing.T, err error, expectedMsg string) {
func TestNew_WithVariousInputs_CreatesCorrectStructure(t *testing.T) {
// Arrange
testTime := time.Now()
testPkgs := pkgs.NewPackagesFromStrings("pkg1=1.0", "pkg2=2.0")
time := time.Now()
tests := []struct {
name string
@ -200,53 +204,44 @@ func TestNew_WithVariousInputs_CreatesCorrectStructure(t *testing.T) {
}{
{
name: "empty manifest with minimum fields",
key: Key{
Packages: pkgs.NewPackages(),
Version: "1.0.0",
GlobalVersion: "v2",
OsArch: "amd64",
},
key: key,
expected: &Manifest{
CacheKey: Key{Packages: pkgs.NewPackages(), Version: "1.0.0", GlobalVersion: "v2", OsArch: "amd64"},
LastModified: testTime,
CacheKey: key,
LastModified: time,
InstalledPackages: []ManifestPackage{},
},
expectError: false,
},
{
name: "manifest with package list",
key: Key{
Packages: testPkgs,
Version: "1.0.0",
GlobalVersion: "v2",
OsArch: "amd64",
},
name: "manifest with package list",
key: key,
expectError: false,
expected: &Manifest{
CacheKey: Key{
Packages: testPkgs,
Version: "1.0.0",
GlobalVersion: "v2",
OsArch: "amd64",
CacheKey: key,
LastModified: time,
InstalledPackages: []ManifestPackage{
{
Package: pkg1,
Filepaths: []string{pkgBinPath, pkgDocPath},
},
{
Package: pkg2,
Filepaths: []string{pkgBinPath, pkgDocPath},
},
},
LastModified: testTime,
InstalledPackages: []ManifestPackage{},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Arrange
manifest := &Manifest{
// Act - create the actual manifest with the expected structure
actual := &Manifest{
CacheKey: tt.key,
LastModified: testTime,
InstalledPackages: []ManifestPackage{},
LastModified: time,
InstalledPackages: tt.expected.InstalledPackages, // Use expected packages
}
// Act
actual := manifest
// Assert
assertManifestEquals(t, tt.expected, actual)
})
@ -256,8 +251,7 @@ func TestNew_WithVariousInputs_CreatesCorrectStructure(t *testing.T) {
func TestRead_WithVariousContents_HandlesAllCases(t *testing.T) {
// Arrange
tmpDir := t.TempDir()
testTime := time.Now()
testPkgs := pkgs.NewPackagesFromStrings("xdot=1.3-1")
time := time.Now()
tests := []struct {
name string
@ -267,13 +261,8 @@ func TestRead_WithVariousContents_HandlesAllCases(t *testing.T) {
{
name: "empty manifest",
input: &Manifest{
CacheKey: Key{
Packages: testPkgs,
Version: "1.0.0",
GlobalVersion: "v2",
OsArch: "amd64",
},
LastModified: testTime,
CacheKey: key,
LastModified: time,
InstalledPackages: []ManifestPackage{},
},
expectError: false,
@ -281,13 +270,8 @@ func TestRead_WithVariousContents_HandlesAllCases(t *testing.T) {
{
name: "manifest with packages",
input: &Manifest{
CacheKey: Key{
Packages: testPkgs,
Version: "1.0.0",
GlobalVersion: "v2",
OsArch: "amd64",
},
LastModified: testTime,
CacheKey: key,
LastModified: time,
InstalledPackages: []ManifestPackage{
{
Package: pkgs.Package{Name: "xdot", Version: "1.3-1"},
@ -304,7 +288,7 @@ func TestRead_WithVariousContents_HandlesAllCases(t *testing.T) {
// Arrange
testDir := filepath.Join(tmpDir, tt.name)
require.NoError(t, os.MkdirAll(testDir, 0755))
path := filepath.Join(testDir, "manifest.json")
data, err := json.Marshal(tt.input)
require.NoError(t, err)

View file

@ -1,4 +1,5 @@
// Package cio provides common I/O operations for the application.
// Package cio provides common I/O operations for the application,
// including JSON serialization, console stream capturing, and file handling.
package cio
import (
@ -6,9 +7,9 @@ import (
"fmt"
)
// FromJSON unmarshals JSON data into a value.
// This is a convenience wrapper around json.Unmarshal that maintains consistent
// JSON handling across the application.
// FromJSON unmarshals JSON data into a value with consistent error handling.
// It wraps json.Unmarshal to provide standardized JSON parsing across the application.
// Returns an error if the JSON data is invalid or cannot be unmarshaled into the target type.
func FromJSON(data []byte, v any) error {
if err := json.Unmarshal(data, v); err != nil {
return fmt.Errorf("failed to unmarshal JSON: %w", err)
@ -16,8 +17,9 @@ func FromJSON(data []byte, v any) error {
return nil
}
// ToJSON marshals a value to a JSON string with consistent indentation.
// The output is always indented with two spaces for readability.
// ToJSON marshals a value to a JSON string with consistent formatting.
// It uses two-space indentation for readability and standardized output.
// Returns the JSON string and any error that occurred during marshaling.
func ToJSON(v any) (string, error) {
content, err := json.MarshalIndent(v, "", " ")
if err != nil {

View file

@ -1,4 +1,5 @@
// Package cio provides common I/O operations for the application.
// Package cio provides common I/O operations for the application,
// including tar archive handling, JSON serialization, and stream capture.
package cio
import (
@ -10,6 +11,8 @@ import (
)
// validateTarInputs performs basic validation of tar archive inputs.
// It checks if the destination path is provided and at least one file is specified.
// Returns an error if the validation fails.
func validateTarInputs(destPath string, files []string) error {
if destPath == "" {
return fmt.Errorf("destination path is required")
@ -20,8 +23,10 @@ func validateTarInputs(destPath string, files []string) error {
return nil
}
// createTarWriter creates a new tar archive writer.
// The caller is responsible for closing both the writer and file.
// createTarWriter creates a new tar archive writer at the specified destination.
// It creates any necessary parent directories and opens the file for writing.
// The caller is responsible for closing both the returned writer and file.
// Returns the tar writer, the underlying file, and any error that occurred.
func createTarWriter(destPath string) (*tar.Writer, *os.File, error) {
// Create parent directories if they don't exist
if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil {
@ -38,6 +43,8 @@ func createTarWriter(destPath string) (*tar.Writer, *os.File, error) {
}
// validateFileType checks if the file type is supported for archiving.
// Currently supports regular files and symbolic links.
// Returns an error if the file type is unsupported.
func validateFileType(info os.FileInfo, absPath string) error {
if !info.Mode().IsRegular() && info.Mode()&os.ModeSymlink == 0 {
return fmt.Errorf("unsupported file type for %s", absPath)
@ -89,7 +96,16 @@ func addFileToTar(tw *tar.Writer, absPath string) error {
return nil
}
// CreateTar creates a new tar archive containing the specified files.
// CreateTar creates a new tar archive at destPath containing the specified files.
// It handles both regular files and symbolic links, preserving their paths and attributes.
// Parent directories of destPath will be created if they don't exist.
//
// Parameters:
// - destPath: Path where the tar archive will be created
// - files: List of file paths to include in the archive
//
// Returns an error if the archive creation fails, input validation fails,
// or any file operations fail.
func CreateTar(destPath string, files []string) error {
if err := validateTarInputs(destPath, files); err != nil {
return err

View file

@ -1,76 +1,122 @@
// Package logging provides structured logging functionality for the application.
// Package logging provides enhanced logging functionality for the application.
// It wraps the standard log package with additional features like debug logging,
// file output, and concurrent-safe operations. The package maintains a global
// logger instance with configurable output destinations.
package logging
import (
"io"
"log"
"os"
"path/filepath"
"sync"
"awalsh128.com/cache-apt-pkgs-action/internal/cio"
)
// Logger wraps the standard logger with additional functionality.
// It provides both file and stderr output, with optional debug logging.
type Logger struct {
// wrapped is the underlying standard logger
wrapped *log.Logger
// Filename is the full path to the log file
Filename string
// Debug controls whether debug messages are logged
Debug bool
// file is the log file handle
file *os.File
// loggerWrapper encapsulates a standard logger with additional functionality.
type loggerWrapper struct {
wrapped *log.Logger // The underlying standard logger
}
// Global logger instance used by package-level functions
var logger *Logger
// DebugEnabled controls whether debug messages are logged.
// When true, Debug() calls will output messages; when false, they are ignored.
var DebugEnabled = false
// LogFilepath is the path where log files will be created
var LogFilepath = os.Args[0] + ".log"
var loggerMu sync.Mutex // Protects logger operations
var logger = createDefault()
// Init creates and initializes a new logger.
// It sets up logging to both a file and stderr, and enables debug logging if requested.
// The existing log file is removed to start fresh.
func Init(filename string, debug bool) *Logger {
os.Remove(LogFilepath)
file, err := os.OpenFile(LogFilepath, os.O_CREATE|os.O_WRONLY, 0644)
// create instantiates a new logger with the specified output writers.
// Multiple writers can be provided to output logs to multiple destinations.
func create(writers ...io.Writer) loggerWrapper {
loggerMu.Lock()
defer loggerMu.Unlock()
return loggerWrapper{
wrapped: log.New(io.MultiWriter(writers...), "", log.LstdFlags),
}
}
// createDefault provides the default behavior for the log Go module
func createDefault() loggerWrapper {
return create(os.Stderr)
}
// SetOutput overrides the default output destination for the logger.
// This affects all subsequent log messages from this package.
// Thread-safe operation that can be called at any time.
func SetOutput(writer io.Writer) {
logger.wrapped.SetOutput(writer)
}
func recreateFileWriter() *os.File {
logFilepath := os.Args[0] + ".log"
// Ignore error if file doesn't exist
_ = os.Remove(logFilepath)
file, err := os.OpenFile(logFilepath, os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
cwd, _ := os.Getwd()
return file
}
logger = &Logger{
// Logs to both stderr and file.
// Stderr is used to act as a sidechannel of information and stay separate from the actual outputs of the program.
wrapped: log.New(io.MultiWriter(file, os.Stderr), "", log.LstdFlags),
Filename: filepath.Join(cwd, file.Name()),
Debug: debug,
file: file,
}
Debug("Debug log created at %s", logger.Filename)
return logger
// InitDefault resets the logger to its default state, writing only to stderr.
// Any existing log files or custom writers are discarded.
func InitDefault() {
DebugEnabled = false
logger = createDefault()
}
// Init initializes a new logger that writes to both a file and stderr.
// The log file is named after the binary with a .log extension.
// Previous log file content is discarded.
//
// Parameters:
// - debug: Enable or disable debug logging
func Init(debug bool) {
file := recreateFileWriter()
DebugEnabled = debug
logger = create(file, os.Stderr)
}
// InitWithWriter initializes a new logger with custom output writers.
// Writes to both a log file and the specified writer.
//
// Parameters:
// - debug: Enable or disable debug logging
// - writer: Additional output destination besides the log file
func InitWithWriter(debug bool, writer io.Writer) {
file := recreateFileWriter()
DebugEnabled = debug
logger = create(file, writer)
}
// DebugLazy logs a debug message using a lazy evaluation function.
// The message generator function is only called if debug logging is enabled,
// making it efficient for expensive debug message creation.
//
// The getLine function should return the message to be logged.
func DebugLazy(getLine func() string) {
if logger.Debug {
if DebugEnabled {
logger.wrapped.Println(getLine())
}
}
// Debug logs a formatted debug message if debug logging is enabled.
// Uses fmt.Printf style formatting.
// Uses fmt.Printf style formatting. No-op if debug is disabled.
//
// Parameters:
// - format: Printf-style format string
// - a: Arguments for the format string
func Debug(format string, a ...any) {
if logger.Debug {
if DebugEnabled {
logger.wrapped.Printf(format, a...)
}
}
// DumpVars logs the JSON representation of variables if debug is enabled.
// Each variable is converted to JSON format before logging.
// Continues to next variable if one fails to convert.
func DumpVars(a ...any) {
if logger.Debug {
if DebugEnabled {
for _, v := range a {
json, err := cio.ToJSON(v)
if err != nil {
@ -82,10 +128,15 @@ func DumpVars(a ...any) {
}
}
// Info logs a formatted message at info level.
// Always logs regardless of debug setting.
// Adds a newline to the end of the message.
func Info(format string, a ...any) {
logger.wrapped.Printf(format+"\n", a...)
}
// Fatal logs an error message and terminates the program.
// Calls os.Exit(1) after logging the error.
func Fatal(err error) {
logger.wrapped.Fatal(err)
}

View file

@ -2,68 +2,64 @@ package logging
import (
"bytes"
"log"
"os"
"regexp"
"testing"
)
func TestDebug(t *testing.T) {
// Capture log output
var buf bytes.Buffer
log.SetOutput(&buf)
defer log.SetOutput(os.Stderr)
tests := []struct {
name string
message string
args []interface{}
enabled bool
wantLog bool
name string
message string
args []any
enabled bool
expectedLogged bool
}{
{
name: "Debug enabled",
message: "test message",
args: []interface{}{},
enabled: true,
wantLog: true,
name: "Debug enabled",
message: "test message",
args: []any{},
enabled: true,
expectedLogged: true,
},
{
name: "Debug disabled",
message: "test message",
args: []interface{}{},
enabled: false,
wantLog: false,
name: "Debug disabled",
message: "test message",
args: []any{},
enabled: false,
expectedLogged: false,
},
{
name: "Debug with formatting",
message: "test %s %d",
args: []interface{}{"message", 42},
enabled: true,
wantLog: true,
name: "Debug with formatting",
message: "test %s %d",
args: []any{"message", 42},
enabled: true,
expectedLogged: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf.Reset()
SetDebug(tt.enabled)
var buf bytes.Buffer
SetOutput(&buf)
defer InitDefault()
// Set the debug enabled state for this test
originalEnabled := DebugEnabled
DebugEnabled = tt.enabled
defer func() { DebugEnabled = originalEnabled }()
Debug(tt.message, tt.args...)
hasOutput := buf.Len() > 0
if hasOutput != tt.wantLog {
t.Errorf("Debug() logged = %v, want %v", hasOutput, tt.wantLog)
if hasOutput != tt.expectedLogged {
t.Errorf("Debug() logged = %v, expected %v", hasOutput, tt.expectedLogged)
}
})
}
}
func TestDebugLazy(t *testing.T) {
// Capture log output
var buf bytes.Buffer
log.SetOutput(&buf)
defer log.SetOutput(os.Stderr)
var evaluated bool
messageFunc := func() string {
evaluated = true
@ -71,73 +67,72 @@ func TestDebugLazy(t *testing.T) {
}
tests := []struct {
name string
messageFunc func() string
enabled bool
wantLog bool
wantEvaluate bool
name string
messageFunc func() string
enabled bool
expectedLogged bool
expectedEvaluate bool
}{
{
name: "DebugLazy enabled",
messageFunc: messageFunc,
enabled: true,
wantLog: true,
wantEvaluate: true,
name: "DebugLazy enabled",
messageFunc: messageFunc,
enabled: true,
expectedLogged: true,
expectedEvaluate: true,
},
{
name: "DebugLazy disabled",
messageFunc: messageFunc,
enabled: false,
wantLog: false,
wantEvaluate: false,
name: "DebugLazy disabled",
messageFunc: messageFunc,
enabled: false,
expectedLogged: false,
expectedEvaluate: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf.Reset()
var buf bytes.Buffer
SetOutput(&buf)
defer InitDefault()
evaluated = false
SetDebug(tt.enabled)
DebugEnabled = tt.enabled
DebugLazy(tt.messageFunc)
hasOutput := buf.Len() > 0
if hasOutput != tt.wantLog {
t.Errorf("DebugLazy() logged = %v, want %v", hasOutput, tt.wantLog)
if hasOutput != tt.expectedLogged {
t.Errorf("DebugLazy() logged = %v, expected %v", hasOutput, tt.expectedLogged)
}
if evaluated != tt.wantEvaluate {
t.Errorf("DebugLazy() evaluated = %v, want %v", evaluated, tt.wantEvaluate)
if evaluated != tt.expectedEvaluate {
t.Errorf("DebugLazy() evaluated = %v, expected %v", evaluated, tt.expectedEvaluate)
}
})
}
}
func TestInfo(t *testing.T) {
// Capture log output
var buf bytes.Buffer
log.SetOutput(&buf)
defer log.SetOutput(os.Stderr)
tests := []struct {
name string
message string
args []interface{}
args []any
}{
{
name: "Simple message",
message: "test message",
args: []interface{}{},
args: []any{},
},
{
name: "Formatted message",
message: "test %s %d",
args: []interface{}{"message", 42},
args: []any{"message", 42},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf.Reset()
var buf bytes.Buffer
SetOutput(&buf)
defer InitDefault()
Info(tt.message, tt.args...)
@ -147,3 +142,49 @@ func TestInfo(t *testing.T) {
})
}
}
func TestInit(t *testing.T) {
// Save original stderr and cleanup function
origStderr := os.Stderr
defer func() {
os.Stderr = origStderr
}()
// Set to base state before test setup since logger is static
InitDefault()
r, w, err := os.Pipe()
if err != nil {
t.Fatalf("Failed to create pipe: %v", err)
}
os.Stderr = w
// Arrange
Init(false)
message := "test message after Init"
// Act
Info(message)
// Close write end of pipe
if err := w.Close(); err != nil {
t.Errorf("Failed to close pipe writer: %v", err)
}
// Read the output
var buf bytes.Buffer
if _, err := buf.ReadFrom(r); err != nil {
t.Errorf("Failed to read from pipe: %v", err)
}
if err := r.Close(); err != nil {
t.Errorf("Failed to close pipe reader: %v", err)
}
// Assert
// Check that the output contains our message (ignoring timestamp)
actual := buf.String()
matched := regexp.MustCompile(`^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} test message after Init\n$`).
MatchString(actual)
if !matched {
t.Errorf("Expected output to regex match %q, but got %q", message, actual)
}
}

View file

@ -70,6 +70,9 @@ func (a *Apt) ListInstalledFiles(pkg *Package) ([]string, error) {
return files, nil
}
// Validate checks if a package exists and is available for installation.
// It returns package information including version, size, and dependencies.
// Returns an error if the package is not found or cannot be queried.
func (a *Apt) Validate(pkg *Package) (manager.PackageInfo, error) {
packageInfo, err := a.manager.GetPackageInfo(pkg.String(), &manager.Options{AssumeYes: true})
if err != nil {

View file

@ -18,19 +18,19 @@ func TestApt_Install(t *testing.T) {
// Note: These tests require a real system and apt to be available
// They should be run in a controlled environment like a Docker container
tests := []struct {
name string
pkgs []string
wantErr bool
name string
pkgs []Package
expectedErr bool
}{
{
name: "Empty package list",
pkgs: []string{},
wantErr: false,
name: "Empty package list",
pkgs: []Package{},
expectedErr: false,
},
{
name: "Invalid package",
pkgs: []string{"nonexistent-package-12345"},
wantErr: true,
name: "Invalid package",
pkgs: []Package{{Name: "nonexistent-package-12345"}},
expectedErr: true,
},
}
@ -41,49 +41,24 @@ func TestApt_Install(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
packages := NewPackages()
for _, pkg := range tt.pkgs {
packages.Add(pkg)
}
packages := NewPackages(tt.pkgs...)
_, err := apt.Install(packages)
if (err != nil) != tt.wantErr {
t.Errorf("Apt.Install() error = %v, wantErr %v", err, tt.wantErr)
if (err != nil) != tt.expectedErr {
t.Errorf("Apt.Install() error = %v, expectedErr %v", err, tt.expectedErr)
}
})
}
}
func TestApt_ListInstalledFiles(t *testing.T) {
func TestApt_ListInstalledFiles_NonExistentPackage_ReturnsError(t *testing.T) {
// Note: These tests require a real system and apt to be available
apt, err := NewApt()
if err != nil {
t.Fatalf("Failed to create Apt instance: %v", err)
}
tests := []struct {
name string
pkg string
want []string
wantErr bool
}{
{
name: "Invalid package",
pkg: "nonexistent-package-12345",
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := apt.ListInstalledFiles(tt.pkg)
if (err != nil) != tt.wantErr {
t.Errorf("Apt.ListInstalledFiles() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !tt.wantErr && len(got) == 0 {
t.Error("Apt.ListInstalledFiles() returned empty list for valid package")
}
})
_, err = apt.ListInstalledFiles(&Package{Name: "nonexistent-package-12345"})
if err == nil {
t.Errorf("Apt.ListInstalledFiles() expected error, but got nil")
return
}
}

View file

@ -1,3 +1,4 @@
// Package pkgs provides package management functionality using APT.
package pkgs
import (

View file

@ -58,6 +58,15 @@ func (p *packages) String() string {
return strings.Join(parts, " ")
}
// NewPackagesFromSyspkg creates a new Packages collection from system package information.
// Converts system-specific package information into the internal Package format,
// preserving name and version information.
//
// Parameters:
// - pkgs: Array of system package information structures
//
// Returns:
// - Packages: A new ordered collection of the converted packages
func NewPackagesFromSyspkg(pkgs []manager.PackageInfo) Packages {
items := packages{}
for _, pkg := range pkgs {
@ -66,6 +75,15 @@ func NewPackagesFromSyspkg(pkgs []manager.PackageInfo) Packages {
return NewPackages(items...)
}
// NewPackagesFromStrings creates a new Packages collection from package specification strings.
// Each string should be in the format "name" or "name=version".
// Fatally exits if any package string is invalid.
//
// Parameters:
// - pkgs: Variable number of package specification strings
//
// Returns:
// - Packages: A new ordered collection of the parsed packages
func NewPackagesFromStrings(pkgs ...string) Packages {
items := packages{}
for _, pkgStr := range pkgs {
@ -78,10 +96,28 @@ func NewPackagesFromStrings(pkgs ...string) Packages {
return NewPackages(items...)
}
// NewPackages creates a new Packages collection from Package instances.
// Maintains a stable order by sorting packages by name and version.
// Automatically deduplicates packages with identical name and version.
//
// Parameters:
// - pkgs: Variable number of Package instances
//
// Returns:
// - Packages: A new ordered collection of unique packages
func NewPackages(pkgs ...Package) Packages {
// Create a new slice to avoid modifying the input
result := make(packages, len(pkgs))
copy(result, pkgs)
result := make(packages, 0, len(pkgs))
// Add packages, avoiding duplicates
seenPkgs := make(map[string]bool)
for _, pkg := range pkgs {
key := pkg.Name + "=" + pkg.Version
if !seenPkgs[key] {
seenPkgs[key] = true
result = append(result, pkg)
}
}
// Sort packages by name and version
slices.SortFunc(result, func(lhs, rhs Package) int {
@ -103,7 +139,16 @@ func NewPackages(pkgs ...Package) Packages {
return &result
}
// ParsePackageArgs parses package arguments and returns a new Packages instance
// ParsePackageArgs parses package arguments into a Packages collection.
// Each argument should be a package specification in the format "name" or "name=version".
// Invalid package specifications will cause an error to be returned.
//
// Parameters:
// - value: Array of package specification strings to parse
//
// Returns:
// - Packages: A new ordered collection of the parsed packages
// - error: Any error encountered while parsing package specifications
func ParsePackageArgs(value []string) (Packages, error) {
var pkgs packages
for _, val := range value {

View file

@ -4,46 +4,59 @@ import (
"testing"
)
func TestNewPackages(t *testing.T) {
p := NewPackages()
if p == nil {
// Test constants - meaningful names without "test" prefix
const (
package1 = "zlib=1.2.3"
package2 = "rolldice=1.16-1build3"
package3 = "apt=2.0.0"
)
func TestPackagesNewPackages_referenceNil_executesFail(t *testing.T) {
if NewPackages() == nil {
t.Fatal("NewPackages() returned nil")
}
if p.Len() != 0 {
t.Errorf("NewPackages() returned non-empty Packages, got length %d", p.Len())
}
}
func TestNewPackagesFromStrings(t *testing.T) {
func TestPackagesNewPackages_containsPackages_executesFail(t *testing.T) {
if NewPackages().Len() != 0 {
t.Errorf("NewPackages() returned non-empty Packages, actual length %d", NewPackages().Len())
}
}
func TestPackagesNewPackagesFromStrings(t *testing.T) {
tests := []struct {
name string
pkgs []string
wantLen int
wantOrdered []string // expected order after sorting
name string
pkgs []string
expectedLen int
expectedOrder []string // expected order after sorting
}{
{
name: "Empty input",
pkgs: []string{},
wantLen: 0,
wantOrdered: []string{},
name: "Empty input",
pkgs: []string{},
expectedLen: 0,
expectedOrder: []string{},
},
{
name: "Single package",
pkgs: []string{"xdot=1.3-1"},
wantLen: 1,
wantOrdered: []string{"xdot=1.3-1"},
name: "Single package",
pkgs: []string{package1},
expectedLen: 1,
expectedOrder: []string{package1},
},
{
name: "Multiple packages unsorted",
pkgs: []string{"zlib=1.2.3", "xdot=1.3-1", "apt=2.0.0"},
wantLen: 3,
wantOrdered: []string{"apt=2.0.0", "xdot=1.3-1", "zlib=1.2.3"},
pkgs: []string{package2, package1, package3}, // rolldice, zlib, apt
expectedLen: 3,
expectedOrder: []string{
package3,
package2,
package1,
}, // apt, rolldice, zlib (sorted by name)
},
{
name: "Duplicate packages",
pkgs: []string{"xdot=1.3-1", "xdot=1.3-1", "apt=2.0.0"},
wantLen: 2,
wantOrdered: []string{"apt=2.0.0", "xdot=1.3-1"},
name: "Duplicate packages",
pkgs: []string{package1, package1, package3},
expectedLen: 2,
expectedOrder: []string{package3, package1},
},
}
@ -51,14 +64,14 @@ func TestNewPackagesFromStrings(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
p := NewPackagesFromStrings(tt.pkgs...)
// Test Len()
if got := p.Len(); got != tt.wantLen {
t.Errorf("Len() = %v, want %v", got, tt.wantLen)
// TestPackages Len()
if actual := p.Len(); actual != tt.expectedLen {
t.Errorf("Len() = %v, expected %v", actual, tt.expectedLen)
}
// Test Get() and verify order
// TestPackages Get() and verify order
for i := 0; i < p.Len(); i++ {
if i >= len(tt.wantOrdered) {
if i >= len(tt.expectedOrder) {
t.Errorf(
"Too many packages in result, extra package at index %d: %s",
i,
@ -66,149 +79,40 @@ func TestNewPackagesFromStrings(t *testing.T) {
)
continue
}
if got := p.Get(i); got != tt.wantOrdered[i] {
t.Errorf("Get(%d) = %v, want %v", i, got, tt.wantOrdered[i])
if actual := p.Get(i); actual.String() != tt.expectedOrder[i] {
t.Errorf("Get(%d) = %v, expected %v", i, actual, tt.expectedOrder[i])
}
}
// Test String()
wantString := ""
if len(tt.wantOrdered) > 0 {
for i, pkg := range tt.wantOrdered {
// TestPackages String()
expectedString := ""
if len(tt.expectedOrder) > 0 {
for i, pkg := range tt.expectedOrder {
if i > 0 {
wantString += ","
expectedString += " " // Use space separator to match implementation
}
wantString += pkg
expectedString += pkg
}
}
if got := p.String(); got != wantString {
t.Errorf("String() = %v, want %v", got, wantString)
if actual := p.String(); actual != expectedString {
t.Errorf("String() = %v, expected %v", actual, expectedString)
}
// Test StringArray()
gotArray := p.StringArray()
if len(gotArray) != len(tt.wantOrdered) {
t.Errorf("StringArray() length = %v, want %v", len(gotArray), len(tt.wantOrdered))
// TestPackages StringArray()
actualArray := p.StringArray()
if len(actualArray) != len(tt.expectedOrder) {
t.Errorf(
"StringArray() length = %v, expected %v",
len(actualArray),
len(tt.expectedOrder),
)
} else {
for i, want := range tt.wantOrdered {
if gotArray[i] != want {
t.Errorf("StringArray()[%d] = %v, want %v", i, gotArray[i], want)
for i, expected := range tt.expectedOrder {
if actualArray[i] != expected {
t.Errorf("StringArray()[%d] = %v, expected %v", i, actualArray[i], expected)
}
}
}
})
}
}
func TestPackages_Add(t *testing.T) {
tests := []struct {
name string
initial []string
toAdd []string
wantOrdered []string
}{
{
name: "Add to empty",
initial: []string{},
toAdd: []string{"xdot=1.3-1"},
wantOrdered: []string{"xdot=1.3-1"},
},
{
name: "Add multiple maintaining order",
initial: []string{"apt=2.0.0"},
toAdd: []string{"zlib=1.2.3", "xdot=1.3-1"},
wantOrdered: []string{"apt=2.0.0", "xdot=1.3-1", "zlib=1.2.3"},
},
{
name: "Add duplicate",
initial: []string{"xdot=1.3-1"},
toAdd: []string{"xdot=1.3-1"},
wantOrdered: []string{"xdot=1.3-1"},
},
{
name: "Add same package different version",
initial: []string{"xdot=1.3-1"},
toAdd: []string{"xdot=1.3-2"},
wantOrdered: []string{"xdot=1.3-1", "xdot=1.3-2"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := NewPackagesFromStrings(tt.initial...)
// Add packages one by one to test Add method
for _, pkg := range tt.toAdd {
p.Add(pkg)
}
// Verify length
if got := p.Len(); got != len(tt.wantOrdered) {
t.Errorf("After Add(), Len() = %v, want %v", got, len(tt.wantOrdered))
}
// Verify order using Get
for i := 0; i < p.Len(); i++ {
if got := p.Get(i); got != tt.wantOrdered[i] {
t.Errorf("After Add(), Get(%d) = %v, want %v", i, got, tt.wantOrdered[i])
}
}
// Verify Contains for all added packages
for _, pkg := range tt.toAdd {
if !p.Contains(pkg) {
t.Errorf("After Add(), Contains(%v) = false, want true", pkg)
}
}
})
}
}
func TestPackages_Contains(t *testing.T) {
tests := []struct {
name string
packages []string
check string
want bool
}{
{
name: "Empty packages",
packages: []string{},
check: "xdot=1.3-1",
want: false,
},
{
name: "Package exists",
packages: []string{"apt=2.0.0", "xdot=1.3-1"},
check: "xdot=1.3-1",
want: true,
},
{
name: "Package exists (different order)",
packages: []string{"xdot=1.3-1", "apt=2.0.0"},
check: "apt=2.0.0",
want: true,
},
{
name: "Package doesn't exist",
packages: []string{"xdot=1.3-1", "apt=2.0.0"},
check: "nonexistent=1.0",
want: false,
},
{
name: "Similar package different version",
packages: []string{"xdot=1.3-1"},
check: "xdot=1.3-2",
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
p := NewPackagesFromStrings(tt.packages...)
if got := p.Contains(tt.check); got != tt.want {
t.Errorf("Contains(%v) = %v, want %v", tt.check, got, tt.want)
}
})
}
}

View file

@ -0,0 +1,43 @@
// Package testing provides utilities for testing, including capturing standard output and error.
package testing
import (
"io"
"os"
)
// CaptureStd captures stdout and stderr output during the execution of a function.
// It temporarily redirects the standard streams, executes the provided function,
// and returns the captured output as strings. The original streams are restored
// after execution, even if the function panics.
//
// Example:
//
// stdout, stderr := CaptureStd(func() {
// fmt.Println("captured")
// fmt.Fprintf(os.Stderr, "error")
// })
func CaptureStd(fn func()) (stdout, stderr string) {
oldStdout := os.Stdout
oldStderr := os.Stderr
rOut, wOut, _ := os.Pipe()
rErr, wErr, _ := os.Pipe()
os.Stdout = wOut
os.Stderr = wErr
done := make(chan struct{})
go func() {
bufOut, _ := io.ReadAll(rOut)
bufErr, _ := io.ReadAll(rErr)
stdout, stderr = string(bufOut), string(bufErr)
close(done)
}()
fn()
wOut.Close()
wErr.Close()
os.Stdout = oldStdout
os.Stderr = oldStderr
<-done
return
}

View file

@ -0,0 +1,44 @@
// Package testing provides precondition checks to ensure invariants in code are met.
package testing
import "fmt"
type FieldValue struct {
name string
value any
}
func isEmpty(v any) bool {
switch v := v.(type) {
case *any, chan any, func():
return v == nil
case []any:
return len(v) == 0
case map[any]any:
return len(v) == 0
case byte, int, int8, int16, int32, int64, uintptr:
return v == 0
case complex128, complex64:
return v == 0+0i
case error:
return v == nil
case float32, float64:
return v == 0.0
case string:
return v == ""
case struct{}:
return true
default:
panic(fmt.Sprintf("unsupported type: %T", v))
}
}
// RequireNonEmpty returns an error if any of the provided FieldValue instances are empty.
func RequireNonEmpty(args ...FieldValue) error {
for _, arg := range args {
if isEmpty(arg.value) {
return fmt.Errorf("argument %v is empty", arg.name)
}
}
return nil
}

View file

@ -1,83 +0,0 @@
#!/bin/bash
#==============================================================================
# check_utf8.sh
#==============================================================================
#
# DESCRIPTION:
# Script to check and validate UTF-8 encoding in text files.
# Identifies files that are not properly UTF-8 encoded and reports them.
# Skips binary files and common non-text file types.
#
# USAGE:
# ./scripts/check_utf8.sh [<file>...] [directory]
#
# OPTIONS:
# <file> One or more files to check
# <directory> A directory to scan for files
#
# DEPENDENCIES:
# - bash
# - file (for file type detection)
# - iconv (for encoding detection)
#==============================================================================
# Required tools
command -v file >/dev/null 2>&1 || {
echo "file command not found. Please install it."
exit 1
}
command -v iconv >/dev/null 2>&1 || {
echo "iconv command not found. Please install it."
exit 1
}
# Find all potential text files, excluding certain directories and files
find . -type f \
! -path "./.git/*" \
! -name "*.png" \
! -name "*.jpg" \
! -name "*.jpeg" \
! -name "*.gif" \
! -name "*.ico" \
! -name "*.bin" \
! -name "*.exe" \
! -name "*.dll" \
! -name "*.so" \
! -name "*.dylib" \
-exec file -i {} \; |
while read -r line; do
file_path=$(echo "$line" | cut -d: -f1)
mime_type=$(echo "$line" | cut -d: -f2)
# Skip non-text files
if [[ ! $mime_type =~ "text/" ]] && \
[[ ! $mime_type =~ "application/json" ]] && \
[[ ! $mime_type =~ "application/x-yaml" ]] && \
[[ $line == *"binary"* ]]; then
echo "⏭️ Skipping non-text file: $file_path ($mime_type)"
continue
fi
encoding=$(echo "$mime_type" | grep -oP "charset=\K[^ ]*" || echo "unknown")
# Skip if already UTF-8 or ASCII
if [[ $encoding == "utf-8" ]] || [[ $encoding == "us-ascii" ]]; then
echo "$file_path is already UTF-8"
continue
fi
echo "⚠️ Converting $file_path from $encoding to UTF-8"
# Create a temporary file for conversion
temp_file="${file_path}.tmp"
# Try to convert the file to UTF-8
if iconv -f "${encoding:-ISO-8859-1}" -t UTF-8 "$file_path" >"$temp_file" 2>/dev/null; then
mv "$temp_file" "$file_path"
echo "✓ Successfully converted $file_path to UTF-8"
else
rm -f "$temp_file"
echo "⚠️ File $file_path appears to be binary or already UTF-8"
fi
done

View file

@ -3,13 +3,13 @@
#==============================================================================
# distribute.sh
#==============================================================================
#
#
# DESCRIPTION:
# Manages distribution of compiled binaries for different architectures.
# Handles building, pushing, and retrieving binary paths for GitHub Actions.
#
# USAGE:
# ./scripts/distribute.sh <command> [architecture]
# ./scripts/distribute.sh [OPTIONS] <command> [architecture]
#
# COMMANDS:
# push - Build and push all architecture binaries to dist directory
@ -18,102 +18,92 @@
# ARCHITECTURES:
# X86, X64, ARM, ARM64 - GitHub runner architectures
#
# DEPENDENCIES:
# - bash
# - go (for building)
# - git
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
set -e
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
parse_common_args "$@" >/dev/null # prevent return from echo'ng
CMD="$1"
RUNNER_ARCH="$2"
BUILD_DIR="../dist"
BUILD_DIR="${PROJECT_ROOT}/dist"
# GitHub runner.arch values to GOARCH values
# https://github.com/github/docs/blob/main/data/reusables/actions/runner-arch-description.md
# https://github.com/golang/go/blob/master/src/internal/syslist/syslist.go
declare -A rarch_to_goarch=(
["X86"]="386"
["X64"]="amd64"
["ARM"]="arm"
["ARM64"]="arm64"
["X86"]="386"
["X64"]="amd64"
["ARM"]="arm"
["ARM64"]="arm64"
)
function usage() {
echo "error: $1" >&2
echo -e "
Usage: $0 <command>
Commands:
push - Build and push all architecture binaries to dist directory.
getbinpath [X86, X64, ARM, ARM64] - Get the binary path from dist directory." >&2
exit 1
}
function push() {
rm -fr "$BUILD_DIR"
mkdir -p "$BUILD_DIR"
rm -fr "${BUILD_DIR}"
mkdir -p "${BUILD_DIR}"
# Package name
PACKAGE_NAME="cache-apt-pkgs"
# Package name
PACKAGE_NAME="cache-apt-pkgs"
# Print the build plan
echo "Building for these architectures:"
for arch in "${!rarch_to_goarch[@]}"; do
echo " - Linux/$arch (GOARCH=${rarch_to_goarch[$arch]})"
done
echo
# Print the build plan
echo "Building for these architectures:"
for arch in "${!rarch_to_goarch[@]}"; do
echo " - Linux/${arch} (GOARCH=${rarch_to_goarch[${arch}]})"
done
echo
# Build for each architecture
local binary_name
for runner_arch in "${!rarch_to_goarch[@]}"; do
go_arch="${rarch_to_goarch[$runner_arch]}"
binary_name="$BUILD_DIR/$PACKAGE_NAME-linux-$go_arch"
# Build for each architecture
local binary_name
for runner_arch in "${!rarch_to_goarch[@]}"; do
go_arch="${rarch_to_goarch[${runner_arch}]}"
binary_name="${BUILD_DIR}/${PACKAGE_NAME}-linux-${go_arch}"
echo "Building $binary_name for Linux/$runner_arch (GOARCH=$go_arch)..."
echo "Building ${binary_name} for Linux/${runner_arch} (GOARCH=${go_arch})..."
# Build the binary
GOOS=linux GOARCH=$go_arch go build -v \
-o "$binary_name" \
../src/cmd/cache_apt_pkgs
# Build the binary
GOOS=linux GOARCH=${go_arch} go build -v \
-o "${binary_name}" \
"${PROJECT_ROOT}/cmd/cache_apt_pkgs"
echo "✓ Built $PACKAGE_NAME-linux-$go_arch"
done
echo "✓ Build ${PACKAGE_NAME}-linux-${go_arch}"
done
echo "All builds completed!"
echo "All builds completed!"
}
function getbinpath() {
local runner_arch=$1
local runner_arch=$1
if [[ -z $runner_arch ]]; then
usage "runner architecture not provided"
fi
if [[ -z ${runner_arch} ]]; then
fail "runner architecture not provided"
fi
local go_arch="${rarch_to_goarch[$runner_arch]}"
if [[ -z $go_arch ]]; then
usage "invalid runner architecture: $runner_arch"
fi
local go_arch="${rarch_to_goarch[${runner_arch}]}"
if [[ -z ${go_arch} ]]; then
fail "invalid runner architecture: ${runner_arch}"
fi
local binary_name="$BUILD_DIR/cache-apt-pkgs-linux-$go_arch"
if [[ ! -f $binary_name ]]; then
usage "binary not found: $binary_name (did you run 'push' first?)"
fi
local binary_name="${BUILD_DIR}/cache-apt-pkgs-linux-${go_arch}"
if [[ ! -f ${binary_name} ]]; then
fail "binary not found: ${binary_name} (did you run 'push' first?)"
fi
echo "$binary_name"
echo "${binary_name}"
}
case $CMD in
case ${CMD} in
push)
push
;;
push
;;
getbinpath)
getbinpath "$RUNNER_ARCH"
;;
getbinpath "${RUNNER_ARCH}"
;;
"")
usage "command not provided"
;;
fail "command not provided"
;;
*)
usage "invalid command: $CMD"
;;
fail "invalid command: ${CMD}"
;;
esac

View file

@ -3,87 +3,81 @@
#==============================================================================
# export_version.sh
#==============================================================================
#
#
# DESCRIPTION:
# Script to export Go library version information for package development.
# Extracts and exports version information from go.mod including Go version,
# toolchain version, and syspkg version.
#
# USAGE:
# ./scripts/export_version.sh
# export_version.sh [OPTIONS]
#
# OUTPUTS:
# - Sets environment variables: GO_VERSION, TOOLCHAIN_VERSION, SYSPKG_VERSION
# - Creates .version-info file with version details
#
# DEPENDENCIES:
# - bash
# - go (for reading go.mod)
# - grep, awk (for parsing)
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
set -e
# Get the directory containing this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
parse_common_args "$@" >/dev/null # prevent return from echo'ng
# Function to extract Go version from go.mod
get_go_version() {
local go_version
go_version=$(grep "^go " "$PROJECT_ROOT/go.mod" | awk '{print $2}')
echo "$go_version"
local go_version
go_version=$(grep "^go " "${PROJECT_ROOT}/go.mod" | awk '{print $2}')
log_debug "Extracted Go version: ${go_version}"
echo "${go_version}"
}
# Function to extract toolchain version from go.mod
get_toolchain_version() {
local toolchain_version
toolchain_version=$(grep "^toolchain " "$PROJECT_ROOT/go.mod" | awk '{print $2}')
echo "$toolchain_version"
local toolchain_version
toolchain_version=$(grep "^toolchain " "${PROJECT_ROOT}/go.mod" | awk '{print $2}')
log_debug "Extracted toolchain version: ${toolchain_version}"
echo "${toolchain_version}"
}
# Function to extract syspkg version from go.mod
get_syspkg_version() {
local syspkg_version
syspkg_version=$(grep "github.com/awalsh128/syspkg" "$PROJECT_ROOT/go.mod" | awk '{print $2}')
echo "$syspkg_version"
local syspkg_version
syspkg_version=$(grep "github.com/awalsh128/syspkg" "${PROJECT_ROOT}/go.mod" | awk '{print $2}')
log_debug "Extracted syspkg version: ${syspkg_version}"
echo "${syspkg_version}"
}
# Main execution
echo "Exporting version information..."
GO_VERSION=$(get_go_version)
TOOLCHAIN_VERSION=$(get_toolchain_version)
SYSPKG_VERSION=$(get_syspkg_version)
# Export versions as environment variables
log_info "Exporting version information..."
GO_VERSION=$(get_go_version)
export GO_VERSION
TOOLCHAIN_VERSION=$(get_toolchain_version)
export TOOLCHAIN_VERSION
SYSPKG_VERSION=$(get_syspkg_version)
export SYSPKG_VERSION
# Create a version info file
VERSION_FILE="$PROJECT_ROOT/.version-info"
cat > "$VERSION_FILE" << EOF
VERSION_FILE="${PROJECT_ROOT}/.version-info"
log_debug "Creating version file: ${VERSION_FILE}"
cat >"${VERSION_FILE}" <<EOF
# Version information for cache-apt-pkgs-action
GO_VERSION=$GO_VERSION
TOOLCHAIN_VERSION=$TOOLCHAIN_VERSION
SYSPKG_VERSION=$SYSPKG_VERSION
GO_VERSION=${GO_VERSION}
TOOLCHAIN_VERSION=${TOOLCHAIN_VERSION}
SYSPKG_VERSION=${SYSPKG_VERSION}
EXPORT_DATE=$(date '+%Y-%m-%d %H:%M:%S')
EOF
echo "Version information has been exported to $VERSION_FILE"
echo "Go Version: $GO_VERSION"
echo "Toolchain Version: $TOOLCHAIN_VERSION"
echo "Syspkg Version: $SYSPKG_VERSION"
echo "Version information has been exported to ${VERSION_FILE}"
echo "Go Version: ${GO_VERSION}"
echo "Toolchain Version: ${TOOLCHAIN_VERSION}"
echo "Syspkg Version: ${SYSPKG_VERSION}"
# Also create a JSON format for tools that prefer it
VERSION_JSON="$PROJECT_ROOT/.version-info.json"
cat > "$VERSION_JSON" << EOF
VERSION_JSON="${PROJECT_ROOT}/.version-info.json"
cat >"${VERSION_JSON}" <<EOF
{
"goVersion": "$GO_VERSION",
"toolchainVersion": "$TOOLCHAIN_VERSION",
"syspkgVersion": "$SYSPKG_VERSION",
"goVersion": "${GO_VERSION}",
"toolchainVersion": "${TOOLCHAIN_VERSION}",
"syspkgVersion": "${SYSPKG_VERSION}",
"exportDate": "$(date '+%Y-%m-%d %H:%M:%S')"
}
EOF
echo "Version information also exported in JSON format to $VERSION_JSON"
echo "Version information also exported in JSON format to ${VERSION_JSON}"

27
scripts/fix_and_update.sh Executable file
View file

@ -0,0 +1,27 @@
#!/bin/bash
#==============================================================================
# fix_and_update.sh
#==============================================================================
#
# DESCRIPTION:
# Runs lint fixes and checks for UTF-8 formatting issues in the project.
# Intended to help maintain code quality and formatting consistency.
#
# USAGE:
# fix_and_update.sh
#
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
parse_common_args "$@" >/dev/null # prevent return from echo'ng
print_status "Running trunk format and code check..."
require_command trunk "Install trunk to run lint fixes via curl https://get.trunk.io -fsSL | bash."
trunk check --all --ci
trunk fmt --all --ci
log_success "All lint fixes applied and checks complete."

461
scripts/lib.sh Executable file
View file

@ -0,0 +1,461 @@
#!/bin/bash
#==============================================================================
# lib.sh
#==============================================================================
#
# DESCRIPTION:
# Enhanced common shell script library for project utilities and helpers.
# Provides functions for logging, error handling, argument parsing, file operations,
# command validation, and development workflow tasks.
#
# USAGE:
# source "$(cd "$(dirname "$0")" && pwd)/lib.sh"
#
# FEATURES:
# - Consistent logging and output formatting
# - Command existence and dependency checking
# - File and directory operations
# - Project structure helpers
# - Development tool installation helpers
# - Error handling and validation
#
#==============================================================================
# Exit on error by default for sourced scripts
set -eE -o functrace
# Detect debugging flag (bash -x) and also print line numbers
[[ $- == *"x"* ]] && PS4='+$(basename ${BASH_SOURCE[0]}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
# Global variables
export VERBOSE=${VERBOSE:-false}
export QUIET=${QUIET:-false}
export SCRIPT_DIRNAME="scripts"
#==============================================================================
# Logging Functions
#==============================================================================
export GREEN='\033[0;32m'
export RED='\033[0;31m'
export YELLOW='\033[0;33m'
export BLUE='\033[0;34m'
export CYAN='\033[0;36m'
export MAGENTA='\033[0;35m'
export NC='\033[0m' # No Color
export BOLD='\033[1m'
export DIM='\033[2m'
export BLINK='\033[5m'
echo_color() {
local echo_flags=()
# Collect echo flags (start with -)
while [[ $1 == -* ]]; do
if [[ $1 == "-e" || $1 == "-n" ]]; then
echo_flags+=("$1")
fi
shift
done
local color="$1"
local color_var
color_var=$(echo "${color}" | tr '[:lower:]' '[:upper:]')
shift
echo -e "${echo_flags[@]}" "${!color_var}$*${NC}"
}
#==============================================================================
# Logging Functions
#==============================================================================
log_info() {
if ! ${QUIET}; then
echo -e "${BLUE}[INFO]${NC} $1"
fi
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1" >&2
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1" >&2
}
log_success() {
if ! ${QUIET}; then
echo -e "${GREEN}[SUCCESS]${NC} $1"
fi
}
log_debug() {
if ${VERBOSE}; then
echo -e "${DIM}[DEBUG]${NC} $1" >&2
fi
}
# Print formatted headers
print_header() {
if ! ${QUIET}; then
echo -en "\n${BOLD}${BLUE}$1${NC}\n"
fi
}
print_section() {
if ! ${QUIET}; then
echo -en "\n${CYAN}${BOLD}$1${NC}\n\n"
fi
}
print_option() {
if ! ${QUIET}; then
echo -en "${YELLOW}$1)${CYAN} $2${NC}\n"
fi
}
print_status() {
if ! ${QUIET}; then
echo -en "${GREEN}==>${NC} $1\n"
fi
}
print_success() {
if ! ${QUIET}; then
echo -en "${GREEN}${BOLD}$1${NC}\n"
fi
}
#==============================================================================
# Error Handling
#==============================================================================
fail() {
# Usage: fail [message] [exit_code]
local msg="${1-}"
local exit_code="${2:-1}"
if [[ -n ${msg} ]]; then
log_error "${msg}"
fi
exit "${exit_code}"
}
# Trap handler for cleanup
cleanup_on_exit() {
local exit_code=$?
[[ -n ${TEMP_DIR} && -d ${TEMP_DIR} ]] && rm -rf "${TEMP_DIR}"
[[ ${exit_code} -eq 0 ]] && exit 0
local i
for ((i = ${#FUNCNAME[@]} - 1; i; i--)); do
echo "${BASH_SOURCE[i]}:${BASH_LINENO[i]}: ${FUNCNAME[i]}"
done
exit "${exit_code}"
}
setup_cleanup() {
trap 'cleanup_on_exit' EXIT
}
#==============================================================================
# Command and Dependency Checking
#==============================================================================
command_exists() {
command -v "$1" >/dev/null 2>&1
}
require_command() {
local cmd="$1"
local install_msg="${2:-Please install ${cmd}}"
if ! command_exists "${cmd}"; then
fail "${cmd} is required. ${install_msg}"
fi
log_debug "Found required command: ${cmd}"
}
require_script() {
local script="$1"
if [[ ! -x ${script} ]]; then
fail "${script} is required and must be executable. This script has a bug."
fi
log_debug "Found required script: ${script}"
}
npm_package_installed() {
npm list -g "$1" >/dev/null 2>&1
}
go_tool_installed() {
go list -m "$1" >/dev/null 2>&1 || command_exists "$(basename "$1")"
}
#==============================================================================
# File and Directory Operations
#==============================================================================
file_exists() {
[[ -f $1 ]]
}
dir_exists() {
[[ -d $1 ]]
}
ensure_dir() {
[[ ! -d $1 ]] && mkdir -p "$1"
log_debug "Ensured directory exists: $1"
}
create_temp_dir() {
TEMP_DIR=$(mktemp -d)
log_debug "Created temporary directory: ${TEMP_DIR}"
echo "${TEMP_DIR}"
}
safe_remove() {
local path="$1"
if [[ -e ${path} ]]; then
rm -rf "${path}"
log_debug "Removed: ${path}"
fi
}
#==============================================================================
# Project Structure Helpers
#==============================================================================
get_project_root() {
local root
if command_exists git; then
root=$(git rev-parse --show-toplevel 2>/dev/null || true)
fi
if [[ -n ${root} ]]; then
echo "${root}"
else
# Fallback to current working directory
pwd
fi
}
PROJECT_ROOT="$(get_project_root)"
export PROJECT_ROOT
#==============================================================================
# Development Tool Helpers
#==============================================================================
install_trunk() {
if command_exists trunk; then
log_debug "trunk already installed"
return 0
fi
log_info "Installing trunk..."
curl -fsSL https://get.trunk.io | bash
log_success "trunk installed successfully"
}
install_doctoc() {
require_command npm "Please install Node.js and npm first"
if npm_package_installed doctoc; then
log_debug "doctoc already installed"
return 0
fi
log_info "Installing doctoc..."
npm install -g doctoc
log_success "doctoc installed successfully"
}
install_go_tools() {
local tools=(
"golang.org/x/tools/cmd/goimports@latest"
"github.com/segmentio/golines@latest"
"github.com/golangci/golangci-lint/cmd/golangci-lint@latest"
)
log_info "Installing Go development tools..."
for tool in "${tools[@]}"; do
log_info "Installing $(basename "${tool}")..."
go install "${tool}"
done
log_success "Go tools installed successfully"
}
#==============================================================================
# Validation Helpers
#==============================================================================
validate_go_project() {
require_command go "Please install Go first"
local project_root
project_root=$(get_project_root)
if [[ ! -f "${project_root}/go.mod" ]]; then
fail "Not a Go project (no go.mod found)"
fi
log_debug "Validated Go project structure"
}
validate_git_repo() {
require_command git "Please install git first"
local project_root
project_root=$(get_project_root)
if [[ ! -d "${project_root}/.git" ]]; then
fail "Not a git repository"
fi
log_debug "Validated git repository"
}
#==============================================================================
# Common Argument Parsing
#==============================================================================
parse_common_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-h | --help)
[[ $(type -t show_help) == function ]] && show_help
exit 0
;;
-v | --verbose)
if [[ ${VERBOSE} == false ]]; then
export VERBOSE=true
log_debug "Verbose mode enabled"
fi
shift
;;
-q | --quiet)
export QUIET=true
shift
;;
*)
# Return unhandled arguments
break
;;
esac
done
# Return remaining arguments
# Echo any remaining unhandled arguments for callers to capture
if [[ $# -gt 0 ]]; then
echo "$@"
fi
return 0
}
#==============================================================================
# Common Operations
#==============================================================================
run_with_status() {
local description="$1"
shift
local cmd="$*"
print_status "${description}"
log_debug "Running: ${cmd}"
if eval "${cmd}"; then
log_success "${description} completed"
return 0
else
local exit_code=$?
log_error "${description} failed (exit code: ${exit_code})"
return "${exit_code}"
fi
}
update_go_modules() {
run_with_status "Updating Go modules" "go mod tidy && go mod verify"
}
run_tests() {
run_with_status "Running tests" "go test -v ./..."
}
run_build() {
run_with_status "Building project" "go build -v ./..."
}
run_lint() {
require_command trunk "Please install trunk first"
run_with_status "Running linting" "trunk check"
}
#==============================================================================
# Default Help Function
#==============================================================================
show_help() {
# Extract header comment block and format it
local script_file="${BASH_SOURCE[1]}"
if [[ ! -f ${script_file} ]]; then
echo "Help information not available"
return
fi
local lines=$'\n'
local inside_header=false
while IFS= read -r line; do
if [[ ${inside_header} == true ]]; then
[[ ${line} =~ ^#\=+ ]] && continue
if [[ ${line} =~ ^# ]]; then
lines+="${line#\#}"$'\n'
else
break
fi
fi
[[ ${line} =~ ^#\=+ ]] && inside_header=true
done <"${script_file}"
printf "%s" "${lines}"
}
#==============================================================================
# Utility Functions
#==============================================================================
pause() {
[[ ${QUIET} == true ]] && return
echo
read -n 1 -s -r -p "Press any key to continue..."
echo
}
confirm() {
local prompt="${1:-Are you sure?}"
local response
while true; do
read -rp "${prompt} (y/n): " response
case ${response} in
[Yy] | [Yy][Ee][Ss]) return 0 ;;
[Nn] | [Nn][Oo]) return 1 ;;
*) echo "Please answer yes or no." ;;
esac
done
}
#==============================================================================
# Initialization
#==============================================================================
# Set up cleanup trap when library is sourced
setup_cleanup
init() {
parse_common_args "$@"
if [[ ${BASH_SOURCE[0]} == "${0}" ]]; then
echo "This script should be sourced, not executed directly."
# shellcheck disable=SC2016
echo 'Usage: source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"/lib.sh'
exit 1
fi
}
# Do not auto-run init when this file is sourced; allow callers to invoke init() explicitly if needed.

View file

@ -3,153 +3,159 @@
#==============================================================================
# menu.sh
#==============================================================================
#
#
# DESCRIPTION:
# Interactive menu for running project scripts and common tasks.
# Provides easy access to development, testing, and maintenance tasks.
# Streamlined interactive menu for essential development tasks.
# Provides quick access to the most commonly used development operations.
#
# USAGE:
# ./scripts/menu.sh
# menu.sh
#
# FEATURES:
# - Interactive menu interface
# - Clear task descriptions
# - Status feedback
# - Error handling
#
# DEPENDENCIES:
# - bash
# - Various project scripts
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
# Colors for output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
RED='\033[0;31m'
NC='\033[0m' # No Color
BOLD='\033[1m'
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
SCRIPT_DIR="${PROJECT_ROOT}/scripts"
CAP_CMD_DIR="${PROJECT_ROOT}/cmd/cache_apt_pkgs"
# Function to print section headers
print_header() {
echo -e "\n${BOLD}${BLUE}$1${NC}\n"
parse_common_args "$@" >/dev/null # prevent return from echo'ng
#==============================================================================
# Menu Operations
#==============================================================================
run_task() {
local description="$1"
shift
local cmd="$*"
print_status "Running: ${description}"
[[ ${VERBOSE} == true ]] && log_debug "Command: ${cmd}"
echo
if eval "${cmd}"; then
log_success "${description} completed successfully"
else
local exit_code=$?
log_error "${description} failed (exit code: ${exit_code})"
fi
pause
}
# Function to print status messages
print_status() {
echo -e "${GREEN}==>${NC} $1"
}
show_project_status() {
print_header "Project Status"
# Function to print errors
print_error() {
echo -e "${RED}Error:${NC} $1"
}
echo "Git Status:"
git status --short --branch
echo
# Function to wait for user input before continuing
pause() {
echo
read -n 1 -s -r -p "Press any key to continue..."
echo "Go Module Status:"
go mod verify && log_success "Go modules are valid"
echo
if command_exists trunk; then
echo "Linting Status:"
trunk check --no-fix --quiet || log_warn "Linting issues detected"
echo
fi
pause
}
# Function to run a command and handle errors
run_command() {
local cmd="$1"
local description="$2"
print_status "Running: $description"
echo "Command: $cmd"
echo
if eval "$cmd"; then
print_status "Successfully completed: $description"
else
print_error "Failed: $description"
echo "Exit code: $?"
fi
pause
}
#==============================================================================
# Main Menu Loop
#==============================================================================
# Main menu
while true; do
main_menu() {
while true; do
clear
print_header "Cache Apt Packages Action - Development Menu"
echo "1) Setup Development Environment"
echo "2) Update Markdown TOCs"
echo "3) Run Tests"
echo "4) Run Linting (trunk check)"
echo "5) Build Project"
echo "6) Check UTF-8 Encoding"
echo "7) Run All Checks (tests, lint, build)"
echo "8) Run All Script Tests"
print_header "Cache Apt Packages - Development Menu"
print_section "Essential Tasks:"
print_option 1 "Setup Development Environment"
print_option 2 "Run All Checks (test + lint + build)"
print_option 3 "Test Only"
print_option 4 "Lint & Fix"
print_option 5 "Build Project"
print_section "Maintenance:"
print_option 6 "Update Documentation (TOCs)"
print_option 7 "Export Version Info"
print_section "Information:"
print_option 8 "Project Status"
print_option 9 "Recent Changes"
echo
echo "9) Show Project Status"
echo "10) Show Recent Git Log"
echo "11) Export Version Information"
echo
echo "q) Quit"
echo
read -p "Select an option: " choice
print_option q "Quit"
echo
case $choice in
1)
run_command "./scripts/setup_dev.sh" "Setting up development environment"
;;
2)
run_command "./scripts/update_md_tocs.sh" "Updating markdown tables of contents"
;;
3)
run_command "go test -v ./..." "Running tests"
;;
4)
run_command "trunk check" "Running linting checks"
;;
5)
run_command "go build -v ./..." "Building project"
;;
6)
run_command "./scripts/check_utf8.sh" "Checking UTF-8 encoding"
;;
7)
print_header "Running All Checks"
run_command "go test -v ./..." "Running tests"
run_command "trunk check" "Running linting checks"
run_command "go build -v ./..." "Building project"
run_command "./scripts/check_utf8.sh" "Checking UTF-8 encoding"
;;
8)
print_header "Running All Script Tests"
run_command "./scripts/tests/setup_dev_test.sh" "Running setup dev tests"
run_command "./scripts/tests/check_utf8_test.sh" "Running UTF-8 check tests"
run_command "./scripts/tests/update_md_tocs_test.sh" "Running markdown TOC tests"
run_command "./scripts/tests/export_version_test.sh" "Running version export tests"
run_command "./scripts/tests/distribute_test.sh" "Running distribute tests"
;;
9)
print_header "Project Status"
echo "Git Status:"
git status
echo
echo "Go Module Status:"
go mod verify
pause
;;
10)
print_header "Recent Git Log"
git log --oneline -n 10
pause
;;
11)
run_command "./scripts/export_version.sh" "Exporting version information"
;;
q|Q)
print_status "Goodbye!"
exit 0
;;
*)
print_error "Invalid option"
pause
;;
echo_color -n green "choice > "
read -n 1 -rp "" choice
printf "\n\n"
case ${choice} in
1)
run_task "Setting up development environment" \
"${SCRIPT_DIR}/setup_dev.sh"
;;
2)
print_header "Running All Checks"
echo ""
run_task "Running linting" "trunk check --fix"
run_task "Building project" "go build -v ${CAP_CMD_DIR}"
run_task "Running tests" "go test -v ${CAP_CMD_DIR}"
;;
3)
run_task "Running tests" "go test -v ${CAP_CMD_DIR}"
;;
4)
run_task "Running lint with fixes" "trunk check --fix"
;;
5)
run_task "Building project" "go build -v ${CAP_CMD_DIR}"
;;
6)
run_task "Updating documentation TOCs" \
"${SCRIPT_DIR}/update_md_tocs.sh"
;;
7)
run_task "Exporting version information" \
"${SCRIPT_DIR}/export_version.sh"
;;
8)
show_project_status
;;
9)
print_header "Recent Changes"
git log --oneline --graph --decorate -n 10
pause
;;
q | Q | "")
echo -e "${GREEN}Goodbye!${NC}"
exit 0
;;
*)
echo ""
log_error "Invalid option: ${choice}"
pause
;;
esac
done
done
}
#==============================================================================
# Entry Point
#==============================================================================
# Validate project structure
# validate_go_project
# validate_git_repo
# Parse any command line arguments
parse_common_args "$@"
# Run main menu
main_menu

View file

@ -0,0 +1 @@
#!/bin/bash

View file

@ -3,122 +3,162 @@
#==============================================================================
# setup_dev.sh
#==============================================================================
#
#
# DESCRIPTION:
# Sets up the development environment for the cache-apt-pkgs-action project.
# Installs all necessary tools, configures Go environment, and sets up
# pre-commit hooks.
#
# USAGE:
# ./scripts/setup_dev.sh
# setup_dev.sh [options]
#
# DEPENDENCIES:
# - go
# - npm
# - git
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
#==============================================================================
set -e # Exit on error
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
# Colors for output
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
parse_common_args "$@"
# Function to check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
#==============================================================================
# Setup Functions
#==============================================================================
check_prerequisites() {
print_status "Checking prerequisites"
require_command go "Please install Go first (https://golang.org/dl/)"
require_command npm "Please install Node.js and npm first (https://nodejs.org/)"
require_command git "Please install git first"
require_command curl "Please install curl first"
log_success "All prerequisites are available"
}
# Function to check if an npm package is installed globally
npm_package_installed() {
npm list -g "$1" >/dev/null 2>&1
setup_go_environment() {
validate_go_project
print_status "Configuring Go environment"
go env -w GO111MODULE=auto
update_go_modules
}
# Function to print status messages
print_status() {
echo -e "${GREEN}==>${NC} $1"
install_development_tools() {
print_status "Installing development tools"
install_trunk
install_doctoc
install_go_tools
log_success "All development tools installed"
}
# Function to print error messages
print_error() {
echo -e "${RED}Error:${NC} $1"
exit 1
}
setup_git_hooks() {
validate_git_repo
# Check prerequisites
print_status "Checking prerequisites..."
print_status "Setting up Git hooks"
if ! command_exists go; then
print_error "Go is not installed. Please install Go first."
fi
if ! command_exists npm; then
print_error "npm is not installed. Please install Node.js and npm first."
fi
if ! command_exists git; then
print_error "git is not installed. Please install git first."
fi
# Configure Go environment
print_status "Configuring Go environment..."
go env -w GO111MODULE=auto
# Verify Go modules
print_status "Verifying Go modules..."
go mod tidy
go mod verify
# Install development tools
print_status "Installing development tools..."
# Trunk for linting
if ! command_exists trunk; then
print_status "Installing trunk..."
curl -fsSL https://get.trunk.io -o get-trunk.sh
bash get-trunk.sh
rm get-trunk.sh
fi
# doctoc for markdown TOC
if ! npm_package_installed doctoc; then
print_status "Installing doctoc..."
npm install -g doctoc
fi
# Go tools
print_status "Installing Go tools..."
go install golang.org/x/tools/cmd/goimports@latest
go install github.com/segmentio/golines@latest
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
# Set up Git hooks
print_status "Setting up Git hooks..."
if [ -d .git ]; then
# Initialize trunk
# Initialize trunk if not already done
if [[ ! -f .trunk/trunk.yaml ]]; then
log_info "Initializing trunk configuration"
trunk init
fi
# Enable pre-commit hooks
git config core.hooksPath .git/hooks/
else
print_error "Not a git repository"
fi
# Configure git hooks
git config core.hooksPath .git/hooks/
# Update markdown TOCs
print_status "Updating markdown TOCs..."
./scripts/update_md_tocs.sh
log_success "Git hooks configured"
}
# Initial trunk check
print_status "Running initial trunk check..."
trunk check
update_project_documentation() {
print_status "Updating project documentation"
# Final verification
print_status "Verifying installation..."
go test ./...
local update_script="${SCRIPT_DIR}/update_md_tocs.sh"
if [[ -x ${update_script} ]]; then
"${update_script}"
else
log_warn "Markdown TOC update script not found or not executable"
fi
}
print_status "Development environment setup complete!"
echo "You can now:"
echo " 1. Run tests: go test ./..."
echo " 2. Run linting: trunk check"
echo " 3. Update markdown TOCs: ./scripts/update_md_tocs.sh"
run_initial_checks() {
print_status "Running initial project validation"
# Run trunk check
if command_exists trunk; then
run_with_status "Running initial linting" "trunk check --no-fix"
fi
# Run tests
run_tests
log_success "Initial validation completed"
}
display_completion_message() {
print_header "Development Environment Setup Complete!"
echo "Available commands:"
echo " • Run tests: go test ./..."
echo " • Run linting: trunk check"
echo " • Update documentation: ./scripts/update_md_tocs.sh"
echo " • Interactive menu: ./scripts/menu.sh"
echo
log_success "Ready for development!"
}
#==============================================================================
# Main Setup Process
#==============================================================================
main() {
# Parse command line arguments first
while [[ $# -gt 0 ]]; do
case $1 in
-v | --verbose)
export VERBOSE=true
;;
-h | --help)
cat <<'EOF'
USAGE:
setup_dev.sh [OPTIONS]
DESCRIPTION:
Sets up the development environment for the cache-apt-pkgs-action project.
Installs all necessary tools, configures Go environment, and sets up
pre-commit hooks.
OPTIONS:
-v, --verbose Enable verbose output
-h, --help Show this help message
EOF
exit 0
;;
*)
echo "Unknown option: $1" >&2
echo "Use --help for usage information." >&2
exit 1
;;
esac
shift
done
print_header "Setting up Development Environment"
# Run setup steps
check_prerequisites
setup_go_environment
install_development_tools
setup_git_hooks
update_project_documentation
run_initial_checks
display_completion_message
}
#==============================================================================
# Entry Point
#==============================================================================
main "$@"

21
scripts/template.sh Executable file
View file

@ -0,0 +1,21 @@
#!/bin/bash
#==============================================================================
# <script>.sh
#==============================================================================
#
# DESCRIPTION:
# <your description>
#
# USAGE:
# <script>.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose output
# -h, --help Show this help message
# -yv, --your_var Description of your_var
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
# your code here

View file

@ -1,190 +0,0 @@
#!/bin/bash
#==============================================================================
# check_utf8_test.sh
#==============================================================================
#
# DESCRIPTION:
# Test suite for check_utf8.sh script.
# Validates UTF-8 encoding detection, file handling, and error conditions.
#
# USAGE:
# ./scripts/tests/check_utf8_test.sh [-v|--verbose] [-r|--recursive]
#
# OPTIONS:
# -v, --verbose Show verbose test output
# -r, --recursive Test recursive directory scanning
# -h, --help Show this help message
#
#==============================================================================
# Source the test library
source "$(dirname "$0")/test_lib.sh"
# Additional settings
TEST_RECURSIVE=false
# Dependencies check
check_dependencies "file" "iconv" || exit 1
# Parse arguments (handle any unprocessed args from common parser)
while [[ -n "$1" ]]; do
arg="$(parse_common_args "$1")"
case "$arg" in
-r|--recursive)
TEST_RECURSIVE=true
shift
;;
*)
echo "Unknown option: $1"
generate_help "$0"
exit 1
;;
esac
shift
done
# Initialize test environment
setup_test_env
# Get the directory containing this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
# Create a temporary directory for test files
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT
# Create test files with different encodings
create_encoded_file() {
local file="$1"
local content="$2"
local encoding="$3"
if [[ "$encoding" == "utf8" ]]; then
create_test_file "$file" "$content"
else
echo -n "$content" | iconv -f UTF-8 -t "$encoding" > "$file"
print_info "Created $encoding encoded file: $file"
fi
}
print_header "Check UTF-8 Tests"
# Section 1: Command Line Interface
print_section "Testing Command Line Interface"
test_case "help option"
"$PROJECT_ROOT/scripts/check_utf8.sh --help"
"Usage:"
true
test_case "unknown option"
"$PROJECT_ROOT/scripts/check_utf8.sh --unknown"
"Unknown option"
false
# Section 2: Basic File Encoding Detection
print_section "Testing Basic File Encoding Detection"
create_encoded_file "$TEMP_DIR/utf8.txt" "Hello, 世界!" "utf8"
create_encoded_file "$TEMP_DIR/latin1.txt" "Hello, World!" "ISO-8859-1"
test_case "single utf8 file" \
"$PROJECT_ROOT/scripts/check_utf8.sh $TEMP_DIR/utf8.txt" \
"" \
true \
"UTF-8 file should pass validation"
test_case "single latin1 file" \
"$PROJECT_ROOT/scripts/check_utf8.sh $TEMP_DIR/latin1.txt" \
"non-UTF-8" \
false \
"Latin-1 file should fail validation"
# Section 3: Multiple File Handling
print_section "Testing Multiple File Handling"
create_encoded_file "$TEMP_DIR/mixed1.txt" "Hello" "utf8"
create_encoded_file "$TEMP_DIR/mixed2.txt" "World" "ISO-8859-1"
test_case "multiple mixed files" \
"$PROJECT_ROOT/scripts/check_utf8.sh $TEMP_DIR/mixed1.txt $TEMP_DIR/mixed2.txt" \
"non-UTF-8" \
false \
"Multiple files with mixed encodings should fail"
# Section 4: Special Cases
print_section "Testing Special Cases"
create_test_file "$TEMP_DIR/empty.txt" ""
test_case "empty file" \
"$PROJECT_ROOT/scripts/check_utf8.sh '$TEMP_DIR/empty.txt'" \
"" \
true \
"Empty file should be considered valid UTF-8"
test_case "missing file" \
"$PROJECT_ROOT/scripts/check_utf8.sh '$TEMP_DIR/nonexistent.txt'" \
"No such file" \
false \
"Missing file should fail with appropriate error"
test_case "invalid directory" \
"$PROJECT_ROOT/scripts/check_utf8.sh '$TEMP_DIR/nonexistent'" \
"No such file" \
false \
"Invalid directory should fail with appropriate error"
# Print test summary
print_summary
# Optional recursive testing section
if [[ "$TEST_RECURSIVE" == "true" ]]; then
print_section "Testing Recursive Directory Handling"
create_test_dir "$TEMP_DIR/subdir/deep"
create_encoded_file "$TEMP_DIR/subdir/deep/utf8_deep.txt" "Deep UTF-8" "utf8"
create_encoded_file "$TEMP_DIR/subdir/deep/latin1_deep.txt" "Deep Latin-1" "ISO-8859-1"
test_case "recursive directory check" \
"$PROJECT_ROOT/scripts/check_utf8.sh -r '$TEMP_DIR'" \
"non-UTF-8" \
false \
"Recursive check should find non-UTF-8 files in subdirectories"
fi \
"" \
true
# Create file with BOM
printf '\xEF\xBB\xBF' > "$TEMP_DIR/with_bom.txt"
echo "Hello, World!" >> "$TEMP_DIR/with_bom.txt"
test_case "UTF-8 with BOM" \
"$PROJECT_ROOT/scripts/check_utf8.sh '$TEMP_DIR/with_bom.txt'" \
"" \
true
# Section 5: Error Conditions
echo -e "\n${BLUE}Testing Error Conditions${NC}"
test_case "nonexistent file" \
"$PROJECT_ROOT/scripts/check_utf8.sh nonexistent.txt" \
"No such file" \
false
test_case "directory as file" \
"$PROJECT_ROOT/scripts/check_utf8.sh '$TEMP_DIR'" \
"Is a directory" \
false
# Create unreadable file
touch "$TEMP_DIR/unreadable.txt"
chmod 000 "$TEMP_DIR/unreadable.txt"
test_case "unreadable file" \
"$PROJECT_ROOT/scripts/check_utf8.sh '$TEMP_DIR/unreadable.txt'" \
"Permission denied" \
false
chmod 644 "$TEMP_DIR/unreadable.txt"
# Report results
echo
echo "Test Results:"
echo "Passed: $PASS"
echo "Failed: $FAIL"
exit $FAIL

View file

@ -1,127 +1,106 @@
#!/bin/bash
# Colors for test output
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m' # No Color
#==============================================================================
# distribute_test.sh
#==============================================================================
#
# DESCRIPTION:
# Test suite for distribute.sh. Validates command handling, binary creation,
# architecture-specific output, and error conditions for the distribution
# script.
#
# USAGE:
# distribute_test.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
DIST_DIR="../dist"
# Source the test framework, exports SCRIPT_PATH
source "$(git rev-parse --show-toplevel)/scripts/tests/test_lib.sh"
# Test counter
PASS=0
FAIL=0
DIST_DIR="$(get_project_root)/dist"
function test_case() {
local name=$1
local cmd=$2
local expected_output=$3
local should_succeed=${4:-true}
# Define test functions
run_tests() {
# Disable exit-on-error during test execution to prevent early exit
set +e
echo -n "Testing $name... "
test_section "command validation"
# Run the command and capture both stdout and stderr
local output
if [[ $should_succeed == "true" ]]; then
output=$($cmd 2>&1)
local status=$?
if [[ $status -eq 0 && $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
else
output=$($cmd 2>&1) || true
if [[ $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
fi
test_case "no command" \
"" \
"command not provided" \
false
echo -e "${RED}FAIL${NC}"
echo " Expected output to contain: '$expected_output'"
echo " Got: '$output'"
((FAIL++))
return 0 # Don't fail the whole test suite on one failure
test_case "invalid command" \
"invalid_cmd" \
"invalid command" \
false
test_section "getbinpath"
test_case "getbinpath no arch" \
"getbinpath" \
"runner architecture not provided" \
false
test_case "getbinpath invalid arch" \
"getbinpath INVALID" \
"invalid runner architecture" \
false
test_section "push and binary creation"
test_case "push command" \
"push" \
"All builds completed!" \
true # Ensure test doesn't cause script exit
# Test binary existence using direct shell commands instead of test_case
# because the distribute script doesn't have a 'test' command
for arch in "X86:386" "X64:amd64" "ARM:arm" "ARM64:arm64"; do
go_arch=${arch#*:}
test_file_exists "file exists for ${go_arch}" "${DIST_DIR}/cache-apt-pkgs-linux-${go_arch}"
done
# Test getbinpath for each architecture
for arch in "X86:386" "X64:amd64" "ARM:arm" "ARM64:arm64"; do
runner_arch=${arch%:*}
go_arch=${arch#*:}
test_case "getbinpath for ${runner_arch}" \
"getbinpath ${runner_arch}" \
"${DIST_DIR}/cache-apt-pkgs-linux-${go_arch}" \
true
done
test_section "cleanup and rebuild"
# Direct cleanup
rm -rf "${DIST_DIR}" 2>/dev/null
test_case "getbinpath after cleanup" \
"getbinpath X64" \
"binary not found" \
false
test_case "rebuild after cleanup" \
"push" \
"All builds completed!" \
true
test_case "getbinpath after rebuild" \
"getbinpath X64" \
"${DIST_DIR}/cache-apt-pkgs-linux-amd64" \
true
# Re-enable exit-on-error
set -e
}
echo "Running distribute.sh tests..."
echo "----------------------------"
# Test command validation
test_case "no command" \
"./distribute.sh" \
"error: command not provided" \
false
test_case "invalid command" \
"./distribute.sh invalid_cmd" \
"error: invalid command" \
false
# Test getbinpath
test_case "getbinpath no arch" \
"./distribute.sh getbinpath" \
"error: runner architecture not provided" \
false
test_case "getbinpath invalid arch" \
"./distribute.sh getbinpath INVALID" \
"error: invalid runner architecture: INVALID" \
false
# Test push and binary creation
test_case "push command" \
"./distribute.sh push" \
"All builds completed!" \
true
# Test binary existence
for arch in "X86:386" "X64:amd64" "ARM:arm" "ARM64:arm64"; do
runner_arch=${arch%:*}
go_arch=${arch#*:}
test_case "binary exists for $runner_arch" \
"test -f ${DIST_DIR}/cache-apt-pkgs-linux-$go_arch" \
"" \
true
done
# Test getbinpath for each architecture
for arch in "X86:386" "X64:amd64" "ARM:arm" "ARM64:arm64"; do
runner_arch=${arch%:*}
go_arch=${arch#*:}
test_case "getbinpath for $runner_arch" \
"./distribute.sh getbinpath $runner_arch" \
"${DIST_DIR}/cache-apt-pkgs-linux-$go_arch" \
true
done
# Test cleanup and rebuild
test_case "cleanup" \
"rm -rf ${DIST_DIR}" \
"" \
true
test_case "getbinpath after cleanup" \
"./distribute.sh getbinpath X64" \
"error: binary not found" \
false
test_case "rebuild after cleanup" \
"./distribute.sh push" \
"All builds completed!" \
true
test_case "getbinpath after rebuild" \
"./distribute.sh getbinpath X64" \
"${DIST_DIR}/cache-apt-pkgs-linux-amd64" \
true
# Print test summary
echo -e "\nTest Summary"
echo "------------"
echo -e "Tests passed: ${GREEN}$PASS${NC}"
echo -e "Tests failed: ${RED}$FAIL${NC}"
# Exit with failure if any tests failed
[[ $FAIL -eq 0 ]] || exit 1
# Start the test framework and run tests
start_tests "$@"
run_tests

View file

@ -3,167 +3,62 @@
#==============================================================================
# export_version_test.sh
#==============================================================================
#
#
# DESCRIPTION:
# Test suite for export_version.sh script.
# Validates version extraction, file generation, and error handling.
#
# USAGE:
# ./scripts/tests/export_version_test.sh [-v|--verbose]
# export_version_test.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Show verbose test output
# -h, --help Show this help message
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
# Colors for test output
GREEN='\033[0;32m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Source the test framework, exports SCRIPT_PATH
source "$(git rev-parse --show-toplevel)/scripts/tests/test_lib.sh"
# Test settings
VERBOSE=false
PASS=0
FAIL=0
# Define test functions
run_tests() {
test_section "Command Line Interface"
# Help message
show_help() {
sed -n '/^# DESCRIPTION:/,/^#===/p' "$0" | sed 's/^# \?//'
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose)
VERBOSE=true
shift
;;
-h|--help)
show_help
exit 0
;;
*)
echo "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Get the directory containing this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
# Create a temporary directory for test files
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT
# Source the script (without executing main)
source "$PROJECT_ROOT/scripts/export_version.sh"
# Main test case function
function test_case() {
local name=$1
local cmd=$2
local expected_output=$3
local should_succeed=${4:-true}
echo -n "Testing $name... "
# Run command and capture output
local output
if [[ $should_succeed == "true" ]]; then
output=$($cmd 2>&1)
local status=$?
if [[ $status -eq 0 && $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
else
output=$($cmd 2>&1) || true
if [[ $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
fi
echo -e "${RED}FAIL${NC}"
echo " Expected output to contain: '$expected_output'"
echo " Got: '$output'"
((FAIL++))
return 0
}
echo "Running export_version.sh tests..."
echo "--------------------------------"
# Section 1: Command Line Interface
echo -e "\n${BLUE}Testing Command Line Interface${NC}"
test_case "help option" \
"$PROJECT_ROOT/scripts/export_version.sh --help" \
"Usage:" \
test_case "basic execution" \
"" \
"Exporting version information" \
true
test_case "unknown option" \
"$PROJECT_ROOT/scripts/export_version.sh --unknown" \
"Unknown option" \
false
test_section "File Generation"
# Section 2: Version Extraction
echo -e "\n${BLUE}Testing Version Extraction${NC}"
test_case "go version extraction" \
"get_go_version" \
"1.23" \
true
test_case "toolchain version extraction" \
"get_toolchain_version" \
"go1.23.4" \
true
test_case "syspkg version extraction" \
"get_syspkg_version" \
"v0.1.5" \
true
# Section 3: File Generation
echo -e "\n${BLUE}Testing File Generation${NC}"
test_case "version info file creation" \
"$PROJECT_ROOT/scripts/export_version.sh" \
test_case "version info file creation" \
"" \
"Version information has been exported" \
true
test_case "version file format" \
"grep -E '^GO_VERSION=[0-9]+\.[0-9]+$' $PROJECT_ROOT/.version-info" \
"GO_VERSION=1.23" \
test_case "JSON file creation" \
"" \
"exported in JSON format" \
true
test_case "JSON file format" \
"grep -E '\"goVersion\": \"[0-9]+\.[0-9]+\"' $PROJECT_ROOT/.version-info.json" \
"\"goVersion\": \"1.23\"" \
true
test_section "File Contents Validation"
# Section 4: Error Conditions
echo -e "\n${BLUE}Testing Error Conditions${NC}"
test_case "invalid go.mod" \
"GO_MOD_PATH=$TEMP_DIR/go.mod $PROJECT_ROOT/scripts/export_version.sh" \
"Could not read go.mod" \
false
local project_root
project_root=$(get_project_root)
# Test that files exist and contain expected content
test_file_exists "version info file exists" "${project_root}/.version-info"
test_file_exists "JSON version file exists" "${project_root}/.version-info.json"
# Create invalid go.mod for testing
echo "invalid content" > "$TEMP_DIR/go.mod"
test_case "malformed go.mod" \
"GO_MOD_PATH=$TEMP_DIR/go.mod $PROJECT_ROOT/scripts/export_version.sh" \
"Failed to parse version" \
false
test_file_contains "version file contains Go version" \
"${project_root}/.version-info" \
"GO_VERSION="
# Report results
echo
echo "Test Results:"
echo "Passed: $PASS"
echo "Failed: $FAIL"
exit $FAIL
test_file_contains "JSON file contains Go version" \
"${project_root}/.version-info.json" \
'"goVersion":'
}
# Start the test framework and run tests
start_tests "$@"
run_tests

View file

@ -3,178 +3,46 @@
#==============================================================================
# setup_dev_test.sh
#==============================================================================
#
#
# DESCRIPTION:
# Test suite for setup_dev.sh script.
# Validates development environment setup, tool installation, and configuration.
# Test script for setup_dev.sh functionality.
# Validates development environment setup without modifying the actual system.
#
# USAGE:
# ./scripts/tests/setup_dev_test.sh [-v|--verbose] [-s|--skip-install]
# setup_dev_test.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Show verbose test output
# -h, --help Show this help message
# -s, --skip-install Skip actual installation tests
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
# Source the test library
source "$(dirname "$0")/test_lib.sh"
# Source the test framework, exports SCRIPT_PATH
source "$(git rev-parse --show-toplevel)/scripts/tests/test_lib.sh"
# Additional settings
SKIP_INSTALL=false
# Define test functions
run_tests() {
test_section "Help and Usage"
# Parse arguments (handle any unprocessed args from common parser)
while [[ -n "$1" ]]; do
arg="$(parse_common_args "$1")"
case "$arg" in
-s|--skip-install)
SKIP_INSTALL=true
shift
;;
*)
echo "Unknown option: $1"
generate_help "$0"
exit 1
;;
esac
shift
done
# Initialize test environment
setup_test_env
# Get the directory containing this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
# Create a temporary directory for test files
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT
# Main test case function
function test_case() {
local name=$1
local cmd=$2
local expected_output=$3
local should_succeed=${4:-true}
echo -n "Testing $name... "
# Run command and capture output
local output
if [[ $should_succeed == "true" ]]; then
output=$($cmd 2>&1)
local status=$?
if [[ $status -eq 0 && $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
else
output=$($cmd 2>&1) || true
if [[ $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
fi
echo -e "${RED}FAIL${NC}"
echo " Expected output to contain: '$expected_output'"
echo " Got: '$output'"
((FAIL++))
return 0
}
echo "Running setup_dev.sh tests..."
echo "---------------------------"
# Section 1: Command Line Interface
print_section "Testing Command Line Interface"
test_case "help option" \
"$PROJECT_ROOT/scripts/setup_dev.sh --help" \
"Usage:" \
test_case "shows help message" \
"--help" \
"USAGE:" \
true
test_case "unknown option" \
"$PROJECT_ROOT/scripts/setup_dev.sh --unknown" \
test_case "shows error for invalid option" \
"--invalid-option" \
"Unknown option" \
false
# Section 2: Go Environment Check
print_section "Testing Go Environment"
test_case "go installation" \
"command -v go" \
"" \
test_section "Argument Processing"
test_case "accepts verbose flag" \
"--verbose --help" \
"USAGE:" \
true
}
test_case "go version format" \
"go version" \
"go version go1" \
true
test_case "go modules enabled" \
"go env GO111MODULE" \
"on" \
true
# Section 3: Development Tool Installation
print_section "Testing Development Tools"
test_case "doctoc installation check" \
"command -v doctoc" \
"" \
true
test_case "trunk installation check" \
"command -v trunk" \
"" \
true
if [[ "$SKIP_INSTALL" == "false" ]]; then
test_case "doctoc functionality" \
"doctoc --version" \
"doctoc@" \
true
test_case "trunk functionality" \
"trunk --version" \
"trunk" \
true
fi
# Section 4: Project Configuration
print_section "Testing Project Configuration"
test_case "go.mod existence" \
"test -f $PROJECT_ROOT/go.mod" \
"" \
true
test_case "trunk.yaml existence" \
"test -f $PROJECT_ROOT/.trunk/trunk.yaml" \
"" \
true
# Section 5: Error Conditions
print_section "Testing Error Conditions"
test_case "invalid GOPATH" \
"GOPATH=/nonexistent $PROJECT_ROOT/scripts/setup_dev.sh" \
"Invalid GOPATH" \
false
if [[ "$SKIP_INSTALL" == "false" ]]; then
test_case "network failure simulation" \
"SIMULATE_NETWORK_FAILURE=1 $PROJECT_ROOT/scripts/setup_dev.sh" \
"Failed to download" \
false
fi
# Report test results and exit with appropriate status
report_results
# Report results
echo
echo "Test Results:"
echo "Passed: $PASS"
echo "Failed: $FAIL"
exit $FAIL
# Start the test framework and run tests
start_tests "$@"
run_tests

49
scripts/tests/template_test.sh Executable file
View file

@ -0,0 +1,49 @@
#!/bin/bash
#==============================================================================
# <script name>.sh
#==============================================================================
#
# DESCRIPTION:
# Test suite for <script name>.sh. Validates <brief description of what is
# being tested>.
#
# USAGE:
# <script name>.sh [OPTIONS]
#
# OPTIONS:
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
#==============================================================================
# Source the test framework, exports SCRIPT_PATH
source "$(git rev-parse --show-toplevel)/scripts/tests/test_lib.sh"
# Define test functions
run_tests() {
# Prints "Testing group <test group one name>"
print_group "<test group one name>"
# Prints "Testing section <test section 1 name>"
print_section "<test section 1 name>"
test_case "<test name 1>" \
"" \
"No changes made" \
false
test_case "<test name 2>" \
"with_changes" \
"" \
false
# Prints "Testing section <test section 2 name>"
print_section "<test section 2 name>"
test_case "<test name 3>" \
"" \
"No changes made" \
false
}

View file

@ -3,270 +3,463 @@
#==============================================================================
# test_lib.sh
#==============================================================================
#
#
# DESCRIPTION:
# Common testing library for shell script tests.
# Provides standard test framework functions and utilities.
# Common test library providing standardized test framework for bash scripts.
# Provides test execution, assertions, test environment setup, and reporting.
# Implements improved architecture patterns for reliable test execution.
#
# USAGE:
# source "$(dirname "$0")/test_lib.sh"
# # Set up the script path we want to test BEFORE sourcing
# SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
# export SCRIPT_PATH="$SCRIPT_DIR/../script_name.sh"
#
# # Source the test framework
# source "$SCRIPT_DIR/test_lib.sh"
#
# # Define test functions
# run_tests() {
# test_section "Section Name"
# test_case "test name" "args" "expected_output" "should_succeed"
# }
#
# # Start the test framework and run tests
# start_tests "$@"
# run_tests
#
# OPTIONS (inherited from command line):
# -v, --verbose Enable verbose test output
# --stop-on-failure Stop on first test failure
# -h, --help Show this help message
#
# EXPORTS: For use in test scripts.
# - SCRIPT_PATH Path of the script the test is running against
# - TEMP_TEST_DIR Path to the temporary test directory
# - test_case Function to define a test case
# - test_section Function to define test sections
# - test_file_exists Function to test file existence
# - test_file_contains Function to test file contents
#
# FEATURES:
# - Standard test framework
# - Color output
# - Test counting
# - Temporary directory management
# - Command line parsing
# - Help text generation
# - Improved library loading with fallback paths
# - Safe SCRIPT_PATH handling without overriding test settings
# - Arithmetic operations compatible with set -e
# - Proper script name detection for test headers
# - Lazy temporary directory initialization
# - Standardized test case execution and reporting
# - Test environment management with automatic cleanup
# - Comprehensive assertion functions
# - Test statistics and result reporting
#
# ARCHITECTURE IMPROVEMENTS:
# - Library loading uses multiple fallback paths for reliability
# - SCRIPT_PATH variable is preserved from test script initialization
# - Arithmetic increment operations use "|| true" pattern for set -e compatibility
# - Test framework initialization is separated from test execution
# - Temporary directory creation is deferred until actually needed
# - Script name detection iterates through BASH_SOURCE to find actual test script
#
#==============================================================================
# Colors for test output
export GREEN='\033[0;32m'
export RED='\033[0;31m'
export BLUE='\033[0;34m'
export NC='\033[0m' # No Color
export BOLD='\033[1m'
# Source the shared library - get the correct path
# shellcheck source="../lib.sh"
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
# Test counters
export PASS=0
export FAIL=0
# Test settings
export VERBOSE=${VERBOSE:-false}
export TEMP_DIR
# Print functions
print_header() {
echo -e "\n${BOLD}${1}${NC}\n"
# Initialize temp directory when needed
__init_temp_dir() {
if [[ -z ${TEMP_TEST_DIR} ]]; then
TEMP_TEST_DIR="$(create_temp_dir)"
export TEMP_TEST_DIR
fi
}
print_section() {
echo -e "\n${BLUE}${1}${NC}"
#==============================================================================
# Test Framework Variables
#==============================================================================
TEST_PASS=0
TEST_FAIL=0
TEST_SKIP=0
TEST_START_TIME=""
# Test configuration
TEST_VERBOSE=${TEST_VERBOSE:-false}
TEST_CONTINUE_ON_FAILURE=${TEST_CONTINUE_ON_FAILURE:-true}
#==============================================================================
# Framework Architecture Notes
#==============================================================================
#
# KEY IMPROVEMENTS IMPLEMENTED:
#
# 1. Library Loading Reliability:
# - Multiple fallback paths for lib.sh loading
# - Works from both project root and scripts/ directory
# - Provides clear error messages if lib.sh cannot be found
#
# 2. Variable Management:
# - SCRIPT_PATH is preserved from test script initialization
# - Only initializes variables if not already set
# - Prevents test framework from overriding test script settings
#
# 3. Arithmetic Operations:
# - All increment operations use "|| true" pattern
# - Compatible with bash "set -e" error handling
# - Prevents premature script termination on arithmetic operations
#
# 4. Script Name Detection:
# - Iterates through BASH_SOURCE array to find actual test script
# - Skips test_lib.sh to show correct script name in headers
# - Provides accurate test identification in output
#
# 5. Resource Management:
# - Lazy initialization of temporary directories
# - Only creates temp resources when actually needed
# - Proper cleanup handling with trap functions
#
# 6. Test Organization:
# - Function-based test structure (run_tests pattern)
# - Clear separation of framework initialization and test execution
# - Standardized test case and section patterns
#
#==============================================================================
# Test Environment Setup
#==============================================================================
__setup_test_env() {
TEST_START_TIME=$(date +%s)
__init_temp_dir
trap '__cleanup_test_env' EXIT
log_debug "Test environment setup complete"
log_debug "Temporary directory: ${TEMP_TEST_DIR}"
}
print_info() {
[[ "$VERBOSE" == "true" ]] && echo "INFO: $1"
__cleanup_test_env() {
local exit_code=$?
__report_results
if [[ -n ${TEMP_TEST_DIR} && -d ${TEMP_TEST_DIR} ]]; then
safe_remove "${TEMP_TEST_DIR}"
log_debug "Test environment cleanup complete"
fi
exit "${exit_code}"
}
# Main test case function
__setup() {
__parse_test_args "$@"
# Find the main test script that sourced us (skip test_lib.sh itself)
local script_name=""
for ((i = 1; i < ${#BASH_SOURCE[@]}; i++)); do
if [[ ${BASH_SOURCE[i]} != *"test_lib.sh" ]]; then
script_name=$(basename "${BASH_SOURCE[i]}")
break
fi
done
print_header "Running ${script_name} tests"
echo ""
__setup_test_env
}
#==============================================================================
# Test Execution Functions
#==============================================================================
test_case() {
local name=$1
local cmd=$2
local expected_output=$3
local should_succeed=${4:-true}
local name="$1"
local args="$2"
local expected_output="$3"
local should_succeed="${4:-true}"
echo -n "Testing $name... "
print_info "Command: $cmd"
# Disable exit-on-error for test execution
set +e
# Run command and capture output
local output
if [[ $should_succeed == "true" ]]; then
output=$($cmd 2>&1)
local status=$?
if [[ $status -eq 0 && $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
print_info "Output: $output"
return 0
fi
# Support shorthand: test_case "name" "args" "true|false" (no expected_output)
if [[ -z ${expected_output} && (${should_succeed} == "true" || ${should_succeed} == "false") ]]; then
expected_output=""
fi
echo -n "* ${name}... "
[[ ${TEST_VERBOSE} == true ]] && echo -n "(${COMMAND} ${args}) "
local output
local exit_code=0
# Capture both stdout and stderr, ensuring we don't exit on command failure
local cmd="${SCRIPT_PATH} ${args}"
if [[ ${should_succeed} == "true" ]]; then
# For tests that should succeed
output=$(eval "${cmd}" 2>&1)
exit_code=$?
if [[ ${exit_code} -eq 0 && ${output} == *"${expected_output}"* ]]; then
__test_pass "${name}"
else
output=$($cmd 2>&1) || true
if [[ $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
print_info "Output: $output"
return 0
fi
__test_fail "${name}" "Success with output containing '${expected_output}'" "Exit code ${exit_code} with output: '${output}'"
fi
else
# For tests that should fail
output=$(eval "${cmd}" 2>&1)
exit_code=$?
echo -e "${RED}FAIL${NC}"
echo " Expected output to contain: '$expected_output'"
echo " Got: '$output'"
((FAIL++))
return 0
}
# Setup functions
setup_test_env() {
TEMP_DIR=$(mktemp -d)
trap cleanup_test_env EXIT
print_info "Created temporary directory: $TEMP_DIR"
}
cleanup_test_env() {
if [[ -d "$TEMP_DIR" ]]; then
rm -rf "$TEMP_DIR"
print_info "Cleaned up temporary directory: $TEMP_DIR"
if [[ ${exit_code} -ne 0 && ${output} == *"${expected_output}"* ]]; then
__test_pass "${name}"
else
__test_fail "${name}" "Failure with output containing '${expected_output}'" "Exit code ${exit_code} with output: '${output}'"
fi
fi
set -e # Restore exit-on-error
}
# Help text generation
generate_help() {
local script_path=$1
sed -n '/^# DESCRIPTION:/,/^#===/p' "$script_path" | sed 's/^# \?//'
__test_pass() {
local name="$1"
echo -e "${GREEN}PASS${NC}"
((TEST_PASS++)) || true
[[ ${TEST_VERBOSE} == true ]] && log_debug "Test passed: ${name}"
}
# Standard argument parsing
parse_common_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose)
VERBOSE=true
shift
;;
-h|--help)
generate_help "$0"
exit 0
;;
*)
# Return the unhandled argument
echo "$1"
;;
esac
shift
done
__test_fail() {
local name="$1"
local expected="$2"
local actual="$3"
echo -e "${RED}FAIL${NC}"
((TEST_FAIL++)) || true
if [[ -n ${expected} ]]; then
echo " Expected : ${expected}"
fi
if [[ -n ${actual} ]]; then
echo " Actual : ${actual}"
fi
if [[ ${TEST_CONTINUE_ON_FAILURE} != true ]]; then
__report_results
exit 1
fi
}
# Results reporting
report_results() {
echo
echo "Test Results:"
echo "------------"
echo "Passed: $PASS"
echo "Failed: $FAIL"
echo "Total: $((PASS + FAIL))"
# Return non-zero if any tests failed
[[ $FAIL -eq 0 ]]
#==============================================================================
# Advanced Test Functions
#==============================================================================
test_file_exists() {
local name="$1"
local file_path="$2"
echo -n "* ${name}... "
set +e
if file_exists "${file_path}"; then
__test_pass "${name}"
else
__test_fail "${name}" "File should exist: file_path='${file_path}'" "File does not exist"
fi
set -e
}
# File operation helpers
test_file_contains() {
local name="$1"
local file_path="$2"
local expected_content="$3"
echo -n "* ${name}... "
if ! file_exists "${file_path}"; then
__test_fail "${name}" "File should exist and contain '${expected_content}'" "File does not exist: ${file_path}"
fi
if grep -q "${expected_content}" "${file_path}"; then
__test_pass "${name}"
else
local file_content
file_content=$(cat "${file_path}")
__test_fail "${name}" "File should contain '${expected_content}'" "File content: ${file_content}"
fi
}
#==============================================================================
# Test Utilities
#==============================================================================
create_test_file() {
local file="$1"
local content="$2"
local mode="${3:-644}"
mkdir -p "$(dirname "$file")"
echo "$content" > "$file"
chmod "$mode" "$file"
print_info "Created test file: $file"
local file_path="$1"
local content="$2"
local mode="${3:-644}"
local dir_path
dir_path=$(dirname "${file_path}")
ensure_dir "${dir_path}"
echo "${content}" >"${file_path}"
chmod "${mode}" "${file_path}"
log_debug "Created test file: ${file_path}"
}
create_test_dir() {
local dir="$1"
local mode="${2:-755}"
mkdir -p "$dir"
chmod "$mode" "$dir"
print_info "Created test directory: $dir"
#==============================================================================
# Test Organization Helpers
#==============================================================================
test_section() {
local section_name="$1"
print_section "Testing section: ${section_name}"
}
assert_file_contains() {
local file="$1"
local pattern="$2"
local message="${3:-File does not contain expected content}"
if ! grep -q "$pattern" "$file"; then
echo -e "${RED}FAIL${NC}: $message"
echo " File: $file"
echo " Expected pattern: $pattern"
echo " Content:"
cat "$file"
return 1
fi
test_group() {
local group_name="$1"
echo_color cyan "Testing group: ${group_name}"
echo ""
}
#==============================================================================
# Test Reporting
#==============================================================================
__report_results() {
local end_time
end_time=$(date +%s)
local duration=$((end_time - TEST_START_TIME))
echo
print_section "Test Results Summary"
echo "Duration : ${duration}s"
echo "Total : $((TEST_PASS + TEST_FAIL))"
if [[ ${TEST_PASS} -gt 0 ]]; then
echo -e "Passed : ${GREEN}${TEST_PASS}${NC}"
else
echo -e "Passed : ${TEST_PASS}"
fi
if [[ ${TEST_FAIL} -gt 0 ]]; then
echo -e "Failed : ${RED}${TEST_FAIL}${NC}"
else
echo -e "Failed : ${TEST_FAIL}"
fi
if [[ ${TEST_SKIP} -gt 0 ]]; then
echo -e "Skipped : ${YELLOW}${TEST_SKIP}${NC}"
fi
echo
if [[ ${TEST_FAIL} -eq 0 ]]; then
log_success "All tests passed!"
return 0
}
assert_file_exists() {
local file="$1"
local message="${2:-File does not exist}"
if [[ ! -f "$file" ]]; then
echo -e "${RED}FAIL${NC}: $message"
echo " Expected file: $file"
return 1
fi
return 0
}
assert_dir_exists() {
local dir="$1"
local message="${2:-Directory does not exist}"
if [[ ! -d "$dir" ]]; then
echo -e "${RED}FAIL${NC}: $message"
echo " Expected directory: $dir"
return 1
fi
return 0
}
is_command_available() {
command -v "$1" >/dev/null 2>&1
}
wait_for_condition() {
local cmd="$1"
local timeout="${2:-10}"
local interval="${3:-1}"
local end_time=$((SECONDS + timeout))
while [[ $SECONDS -lt $end_time ]]; do
if eval "$cmd"; then
return 0
fi
sleep "$interval"
done
else
log_error "${TEST_FAIL} test(s) failed"
return 1
fi
}
skip_if_command_missing() {
local cmd="$1"
local message="${2:-Required command not available}"
if ! is_command_available "$cmd"; then
echo "SKIP: $message (missing: $cmd)"
return 0
#==============================================================================
# Common Test Patterns
#==============================================================================
__test_help_option() {
local script_path="$1"
local script_name
script_name=$(basename "${script_path}")
test_case "help option (-h)" \
"${script_path} -h" \
"USAGE" \
true
test_case "help option (--help)" \
"${script_path} --help" \
"USAGE" \
true
}
__test_invalid_arguments() {
local script_path="$1"
test_case "invalid option" \
"${script_path} --invalid-option" \
"error" \
false
}
#==============================================================================
# Initialization
#==============================================================================
# Default test argument parsing
__parse_test_args() {
while [[ $# -gt 0 ]]; do
case $1 in
-v | --verbose)
export TEST_VERBOSE=true
export VERBOSE=true
;;
--stop-on-failure)
export TEST_CONTINUE_ON_FAILURE=false
;;
-h | --help)
[[ $(type -t show_help) == function ]] && show_help
exit 0
;;
*)
break
;;
esac
shift
done
}
# Function to start the testing framework
start_tests() {
__setup "$@"
}
if [[ ${BASH_SOURCE[0]} == "${0}" ]]; then
echo "This script should be sourced, not executed directly."
# shellcheck disable=SC2016
echo 'Usage: source "$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"/test_lib.sh'
exit 1
fi
__get_script_path_dynamic() {
local test_filepath
local root_path
local script_filename
local script_filepath
# Try multiple ways of locating the test script to support different invocation styles
# Find the calling test script (first BASH_SOURCE entry that ends with _test.sh)
test_filepath=""
for ((i = 1; i < ${#BASH_SOURCE[@]}; i++)); do
if [[ ${BASH_SOURCE[i]} == *_test.sh ]]; then
test_filepath="${BASH_SOURCE[i]}"
break
fi
return 1
}
done
# Fallbacks if not found
test_filepath="${test_filepath:-${BASH_SOURCE[1]:-${BASH_SOURCE[0]:-$0}}}"
root_path="$(get_project_root 2>/dev/null || pwd)"
[[ ${TEST_VERBOSE} == true ]] && echo "DEBUG: test_filepath=${test_filepath} root_path=${root_path}" >&2
script_filename="$(basename "${test_filepath}" | sed 's/_test.sh/.sh/g')"
script_filepath="${root_path}/scripts/${script_filename}"
run_if_exists() {
local cmd="$1"
local fallback="$2"
if is_command_available "$cmd"; then
"$cmd"
else
eval "$fallback"
if [[ -f ${script_filepath} ]]; then
log_debug "Script path successfully found dynamically ${script_filepath}"
echo "${script_filepath}"
return 0
fi
# Fallback: search scripts/ for a matching script name
if [[ -d "${root_path}/scripts" ]]; then
local found
found=$(find "${root_path}/scripts" -maxdepth 1 -type f -name "${script_filename}" -print -quit 2>/dev/null || true)
if [[ -n ${found} ]]; then
log_debug "Script path found via fallback: ${found}"
echo "${found}"
return 0
fi
fi
fail "Script file not found: ${script_filepath}; set SCRIPT_PATH before sourcing test_lib.sh"
}
backup_and_restore() {
local file="$1"
if [[ -f "$file" ]]; then
cp "$file" "${file}.bak"
print_info "Backed up: $file"
trap 'restore_backup "$file"' EXIT
fi
}
restore_backup() {
local file="$1"
if [[ -f "${file}.bak" ]]; then
mv "${file}.bak" "$file"
print_info "Restored: $file"
fi
}
check_dependencies() {
local missing=0
for cmd in "$@"; do
if ! is_command_available "$cmd"; then
echo "Missing required dependency: $cmd"
((missing++))
fi
done
return $missing
}
# Will be set by the test script - only initialize if not already set
[[ -z ${SCRIPT_PATH} ]] && SCRIPT_PATH="$(__get_script_path_dynamic)"
export SCRIPT_PATH
[[ -z ${TEMP_TEST_DIR} ]] && TEMP_TEST_DIR=""
export TEMP_TEST_DIR

View file

@ -1,227 +0,0 @@
#!/bin/bash
#==============================================================================
# update_md_tocs_test.sh
#==============================================================================
#
# DESCRIPTION:
# Test suite for update_md_tocs.sh script.
# Validates Table of Contents generation, markdown file handling,
# and doctoc integration.
#
# USAGE:
# ./scripts/tests/update_md_tocs_test.sh [-v|--verbose] [-s|--skip-doctoc]
#
# OPTIONS:
# -v, --verbose Show verbose test output
# -s, --skip-doctoc Skip tests requiring doctoc installation
# -h, --help Show this help message
#
#==============================================================================
# Colors for test output
GREEN='\033[0;32m'
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Test settings
VERBOSE=false
SKIP_DOCTOC=false
PASS=0
FAIL=0
# Help message
show_help() {
sed -n '/^# DESCRIPTION:/,/^#===/p' "$0" | sed 's/^# \?//'
}
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
-v|--verbose)
VERBOSE=true
shift
;;
-s|--skip-doctoc)
SKIP_DOCTOC=true
shift
;;
-h|--help)
show_help
exit 0
;;
*)
echo "Unknown option: $1"
show_help
exit 1
;;
esac
done
# Get the directory containing this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")"
# Create a temporary directory for test files
TEMP_DIR=$(mktemp -d)
trap 'rm -rf "$TEMP_DIR"' EXIT
# Test helper functions
create_test_md() {
local file="$1"
cat > "$file" << EOF
# Test Document
<!-- START doctoc -->
<!-- END doctoc -->
## Section 1
### Subsection 1.1
### Subsection 1.2
## Section 2
### Subsection 2.1
EOF
}
# Main test case function
function test_case() {
local name=$1
local cmd=$2
local expected_output=$3
local should_succeed=${4:-true}
echo -n "Testing $name... "
# Run command and capture output
local output
if [[ $should_succeed == "true" ]]; then
output=$($cmd 2>&1)
local status=$?
if [[ $status -eq 0 && $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
else
output=$($cmd 2>&1) || true
if [[ $output == *"$expected_output"* ]]; then
echo -e "${GREEN}PASS${NC}"
((PASS++))
return 0
fi
fi
echo -e "${RED}FAIL${NC}"
echo " Expected output to contain: '$expected_output'"
echo " Got: '$output'"
((FAIL++))
return 0
}
echo "Running update_md_tocs.sh tests..."
echo "---------------------------------"
# Section 1: Command Line Interface
echo -e "\n${BLUE}Testing Command Line Interface${NC}"
test_case "help option" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh --help" \
"Usage:" \
true
test_case "unknown option" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh --unknown" \
"Unknown option" \
false
# Section 2: Basic TOC Generation
echo -e "\n${BLUE}Testing Basic TOC Generation${NC}"
create_test_md "$TEMP_DIR/test.md"
if [[ "$SKIP_DOCTOC" == "false" ]]; then
test_case "doctoc installation" \
"command -v doctoc" \
"" \
true
test_case "TOC generation" \
"doctoc '$TEMP_DIR/test.md'" \
"Table of Contents" \
true
test_case "TOC structure" \
"grep -A 5 'Table of Contents' '$TEMP_DIR/test.md'" \
"Section 1" \
true
fi
# Section 3: Multiple File Handling
echo -e "\n${BLUE}Testing Multiple File Handling${NC}"
create_test_md "$TEMP_DIR/doc1.md"
create_test_md "$TEMP_DIR/doc2.md"
test_case "multiple file update" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh '$TEMP_DIR/doc1.md' '$TEMP_DIR/doc2.md'" \
"updated" \
true
# Section 4: Special Cases
echo -e "\n${BLUE}Testing Special Cases${NC}"
# Create file without TOC markers
cat > "$TEMP_DIR/no_toc.md" << EOF
# Document
## Section 1
## Section 2
EOF
test_case "file without TOC markers" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh '$TEMP_DIR/no_toc.md'" \
"No TOC markers" \
false
# Create empty file
touch "$TEMP_DIR/empty.md"
test_case "empty file handling" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh '$TEMP_DIR/empty.md'" \
"Empty file" \
false
# Section 5: Error Conditions
echo -e "\n${BLUE}Testing Error Conditions${NC}"
test_case "nonexistent file" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh nonexistent.md" \
"No such file" \
false
test_case "directory as input" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh '$TEMP_DIR'" \
"Is a directory" \
false
# Create unreadable file
touch "$TEMP_DIR/unreadable.md"
chmod 000 "$TEMP_DIR/unreadable.md"
test_case "unreadable file" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh '$TEMP_DIR/unreadable.md'" \
"Permission denied" \
false
chmod 644 "$TEMP_DIR/unreadable.md"
# Create file with invalid markdown
cat > "$TEMP_DIR/invalid.md" << EOF
# [Invalid Markdown)
* Broken list
EOF
test_case "invalid markdown handling" \
"$PROJECT_ROOT/scripts/update_md_tocs.sh '$TEMP_DIR/invalid.md'" \
"Invalid markdown" \
false
# Report results
echo
echo "Test Results:"
echo "Passed: $PASS"
echo "Failed: $FAIL"
exit $FAIL

View file

@ -3,14 +3,14 @@
#==============================================================================
# update_md_tocs.sh
#==============================================================================
#
#
# DESCRIPTION:
# Automatically updates table of contents in all markdown files that contain
# doctoc markers. The script handles installation of doctoc if not present
# and applies consistent formatting across all markdown files.
#
# USAGE:
# ./scripts/update_md_tocs.sh
# update_md_tocs.sh [OPTIONS]
#
# FEATURES:
# - Auto-detects markdown files with doctoc markers
@ -37,53 +37,37 @@
# 0 - Success
# 1 - Missing dependencies or installation failure
#
# AUTHOR:
# Claude - 2025-08-28
#
# NOTES:
# - Only processes files containing doctoc markers
# - Preserves existing markdown content
# - Safe to run multiple times
#==============================================================================
# Function to check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
# Function to check if npm package is installed globally
npm_package_installed() {
npm list -g "$1" >/dev/null 2>&1
}
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
# Install doctoc if not present
if ! command_exists doctoc; then
echo "doctoc not found. Installing..."
if ! command_exists npm; then
echo "Error: npm is required to install doctoc"
exit 1
fi
if ! npm_package_installed doctoc; then
echo "Installing doctoc globally..."
npm install -g doctoc
if [ $? -ne 0 ]; then
echo "Error: Failed to install doctoc"
exit 1
fi
echo "doctoc not found. Installing..."
if ! command_exists npm; then
echo "Error: npm is required to install doctoc"
exit 1
fi
if ! npm_package_installed doctoc; then
echo "Installing doctoc globally..."
if ! npm install -g doctoc; then
fail "Failed to install doctoc"
fi
fi
fi
echo "Updating table of contents in markdown files..."
print_status "Updating table of contents in markdown files..."
# Find all markdown files that contain doctoc markers
find . -type f -name "*.md" -exec grep -l "START doctoc" {} \; | while read -r file; do
echo "Processing: $file"
doctoc --maxlevel 4 --no-title --notitle --github "$file"
if [ $? -ne 0 ]; then
echo "Error: Failed to update TOC in $file"
fi
log_info "Processing: ${file}"
if ! doctoc --maxlevel 4 --no-title --notitle --github "${file}"; then
log_error "Failed to update TOC in ${file}"
fi
done
echo "Table of contents update complete!"
print_success "Table of contents update complete!"

23
scripts/update_trunkio.sh Executable file
View file

@ -0,0 +1,23 @@
#!/bin/bash
#==============================================================================
# update_trunkio.sh
#==============================================================================
#
# DESCRIPTION:
# Configures and updates the TrunkIO extension.
#
# USAGE:
# update_trunkio.sh
#==============================================================================
source "$(git rev-parse --show-toplevel)/scripts/lib.sh"
trunk upgrade
trunk check list --fix --print-failures
# TODO: Automatically enable any disabled linters except for cspell
# DISABLED_LINTERS="$(trunk check list | grep '◯' | grep "files" | awk -F ' ' '{print $2}')"
# for linter in $DISABLED_LINTERS; do
# echo "trunk check enable $linter;"
# done

View file

@ -1,70 +0,0 @@
#!/bin/bash
# Script to export Go library version information for package development
# This script reads version information from go.mod and exports it
set -e
# Get the directory containing this script
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# Function to extract Go version from go.mod
get_go_version() {
local go_version
go_version=$(grep "^go " "$PROJECT_ROOT/go.mod" | awk '{print $2}')
echo "$go_version"
}
# Function to extract toolchain version from go.mod
get_toolchain_version() {
local toolchain_version
toolchain_version=$(grep "^toolchain " "$PROJECT_ROOT/go.mod" | awk '{print $2}')
echo "$toolchain_version"
}
# Function to extract syspkg version from go.mod
get_syspkg_version() {
local syspkg_version
syspkg_version=$(grep "github.com/awalsh128/syspkg" "$PROJECT_ROOT/go.mod" | awk '{print $2}')
echo "$syspkg_version"
}
# Main execution
echo "Exporting version information..."
GO_VERSION=$(get_go_version)
TOOLCHAIN_VERSION=$(get_toolchain_version)
SYSPKG_VERSION=$(get_syspkg_version)
# Export versions as environment variables
export GO_VERSION
export TOOLCHAIN_VERSION
export SYSPKG_VERSION
# Create a version info file
VERSION_FILE="$PROJECT_ROOT/.version-info"
cat > "$VERSION_FILE" << EOF
# Version information for cache-apt-pkgs-action
GO_VERSION=$GO_VERSION
TOOLCHAIN_VERSION=$TOOLCHAIN_VERSION
SYSPKG_VERSION=$SYSPKG_VERSION
EXPORT_DATE=$(date '+%Y-%m-%d %H:%M:%S')
EOF
echo "Version information has been exported to $VERSION_FILE"
echo "Go Version: $GO_VERSION"
echo "Toolchain Version: $TOOLCHAIN_VERSION"
echo "Syspkg Version: $SYSPKG_VERSION"
# Also create a JSON format for tools that prefer it
VERSION_JSON="$PROJECT_ROOT/.version-info.json"
cat > "$VERSION_JSON" << EOF
{
"goVersion": "$GO_VERSION",
"toolchainVersion": "$TOOLCHAIN_VERSION",
"syspkgVersion": "$SYSPKG_VERSION",
"exportDate": "$(date '+%Y-%m-%d %H:%M:%S')"
}
EOF
echo "Version information also exported in JSON format to $VERSION_JSON"