Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding arm based benchmark tests #1654

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions .github/template/generate_page/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ inputs:
TOKEN:
required: true
description: "Token for checkin"
MACHINE:
required: true
description: "Type of machine"

runs:
using: "composite"
Expand Down Expand Up @@ -42,7 +45,7 @@ runs:
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.TYPE }}/bandwidth/${{ inputs.TEST }}
benchmark-data-dir-path: ${{ inputs.MACHINE }}/${{ inputs.TYPE }}/bandwidth/${{ inputs.TEST }}

# Push the latency results and publish the graphs
- name: "Update Latency Results : ${{ inputs.TEST }}"
Expand All @@ -57,7 +60,7 @@ runs:
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.TYPE }}/latency/${{ inputs.TEST }}
benchmark-data-dir-path: ${{ inputs.MACHINE }}/${{ inputs.TYPE }}/latency/${{ inputs.TEST }}



286 changes: 286 additions & 0 deletions .github/template/perftesting/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,286 @@
name: perftesting
description: "Execute perf testing scripts and generate the pages"

inputs:
MACHINE:
required: true
description: "Type of machine"
STANDARD_ACCOUNT:
required: true
description: "Standard Storage Account"
STANDARD_KEY:
required: true
description: "Standard Storage Account Key"
PREMIUM_ACCOUNT:
required: true
description: "Premium Storage Account"
PREMIUM_KEY:
required: true
description: "Premium Storage Account Key"
STANDARD_HNS_ACCOUNT:
required: true
description: "Standard HNS Storage Account"
STANDARD_HNS_KEY:
required: true
description: "Standard HNS Storage Account Key"
PREMIUM_HNS_ACCOUNT:
required: true
description: "Premium HNS Storage Account"
PREMIUM_HNS_KEY:
required: true
description: "Premium HNS Storage Account Key"
BENCH_CONTAINER:
required: true
description: "Container for benchmarking"
GITHUB_TOKEN:
required: true
description: "GitHub Token"

runs:
using: "composite"

steps:
# Print the host info
- name: 'Host info'
shell: bash
run: hostnamectl

# Install Fuse3
- name: "Install Fuse3"
shell: bash
run: |
sleep 30
sudo systemctl stop apt-daily.timer || true
echo "Add Lock Timeout for apt package manager"
sudo sh -c 'echo "DPkg::Lock::Timeout \"120\";" > /etc/apt/apt.conf.d/99timeout'
sudo ps -aux | grep -iE "apt"
sudo killall apt apt-get || true
yes | sudo fuser -vik -TERM /var/lib/dpkg/lock /var/lib/dpkg/lock-frontend /var/lib/apt/lists/lock || true
echo "Released any lock if some other process has acquired"
sudo dpkg --configure -a
echo "Starting Updates and Installation of Packages"
sudo apt-get update --fix-missing
sudo apt-get install fuse3 libfuse3-dev gcc -y

# Install Tools
- name: "Install Tools"
shell: bash
run: |
sudo apt-get install fio jq python3 -y

# Install GoLang
- name: "Install Go"
shell: bash
run: |
./go_installer.sh ../
go version

# Build Blobfuse2
- name: "Build Blobfuse2"
shell: bash
run: |
./build.sh

# Run binary and validate the version
- name: "Validate Version"
shell: bash
run: |
sudo cp ./blobfuse2 /usr/bin/
which blobfuse2
blobfuse2 --version

- name: "Create Env variables for account name and key"
shell: bash
run: |
if [ "${{ matrix.TestType }}" == "standard" ]; then
echo "Create standard account env"
echo "AZURE_STORAGE_ACCOUNT=${{ inputs.STANDARD_ACCOUNT }}" >> $GITHUB_ENV
echo "AZURE_STORAGE_ACCESS_KEY=${{ inputs.STANDARD_KEY }}" >> $GITHUB_ENV
elif [ "${{ matrix.TestType }}" == "premium" ]; then
echo "Create premium account env"
echo "AZURE_STORAGE_ACCOUNT=${{ inputs.PREMIUM_ACCOUNT }}" >> $GITHUB_ENV
echo "AZURE_STORAGE_ACCESS_KEY=${{ inputs.PREMIUM_KEY }}" >> $GITHUB_ENV
elif [ "${{ matrix.TestType }}" == "standard_hns" ]; then
echo "Create standard hns account env"
echo "AZURE_STORAGE_ACCOUNT=${{ inputs.STANDARD_HNS_ACCOUNT }}" >> $GITHUB_ENV
echo "AZURE_STORAGE_ACCESS_KEY=${{ inputs.STANDARD_HNS_KEY }}" >> $GITHUB_ENV
elif [ "${{ matrix.TestType }}" == "premium_hns" ]; then
echo "Create premium hns account env"
echo "AZURE_STORAGE_ACCOUNT=${{ inputs.PREMIUM_HNS_ACCOUNT }}" >> $GITHUB_ENV
echo "AZURE_STORAGE_ACCESS_KEY=${{ inputs.PREMIUM_HNS_KEY }}" >> $GITHUB_ENV
fi

# Create the config file for testing
- name: "Create config file for account type: ${{ matrix.TestType }}"
shell: bash
run: |
blobfuse2 gen-test-config --config-file=azure_block_bench.yaml --container-name=${{ inputs.BENCH_CONTAINER }} --output-file=./config.yaml
cat ./config.yaml

# Create the config file for testing
- name: "Create mount path"
shell: bash
run: |
sudo mkdir -p /mnt/blob_mnt
sudo mkdir -p /mnt/tempcache
sudo chmod 777 /mnt/blob_mnt
sudo chmod 777 /mnt/tempcache

# ---------------------------------------------------------------------------------------------------------------------------------------------------
# Run the basic tests using FIO

# Run the Write tests
- name: "Read Test"
uses: "./.github/template/generate_page"
with:
MACHINE: ${{ inputs.MACHINE }}
TEST: "read"
TYPE: ${{ matrix.TestType }}
TOKEN: ${{ inputs.GITHUB_TOKEN }}

# Run the Write tests with high number of threads
- name: "High threads Test"
uses: "./.github/template/generate_page"
with:
MACHINE: ${{ inputs.MACHINE }}
TEST: "highlyparallel"
TYPE: ${{ matrix.TestType }}
TOKEN: ${{ inputs.GITHUB_TOKEN }}

# Run the Write tests
- name: "Write Test"
uses: "./.github/template/generate_page"
with:
MACHINE: ${{ inputs.MACHINE }}
TEST: "write"
TYPE: ${{ matrix.TestType }}
TOKEN: ${{ inputs.GITHUB_TOKEN }}

# Run the Create tests
- name: "Create File Test"
uses: "./.github/template/generate_page"
with:
MACHINE: ${{ inputs.MACHINE }}
TEST: "create"
TYPE: ${{ matrix.TestType }}
TOKEN: ${{ inputs.GITHUB_TOKEN }}
# ---------------------------------------------------------------------------------------


# Below tests needs to run seperatly as output is different
# ---------------------------------------------------------------------------------------------------
# Run the List tests
# this shall always runs after create tests
- name: "List File Test"
shell: bash
run: |
rm -rf /mnt/blob_mnt/*
rm -rf /mnt/tempcache/*
./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt list

- name: "Update Benchmark Results : List"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: list/list_results.json
tool: 'customSmallerIsBetter'
alert-threshold: "500%"
max-items-in-chart: 100
github-token: ${{ inputs.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.MACHINE }}/${{ matrix.TestType }}/time/list

# ---------------------------------------------------------------------------------------
# Run App baseed tests
# This needs to run seperatly as output is different
- name: "App based Test"
shell: bash
run: |
rm -rf /mnt/blob_mnt/*
rm -rf /mnt/tempcache/*
./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt app


- name: "Update Bandwidth Results : App"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: app/app_bandwidth.json
tool: 'customBiggerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ inputs.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.MACHINE }}/${{ matrix.TestType }}/bandwidth/app

- name: "Update Latency Results : App"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: app/app_time.json
tool: 'customSmallerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ inputs.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.MACHINE }}/${{ matrix.TestType }}/time/app

- name: "Update Bandwidth Results : High Speed App"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: app/highapp_bandwidth.json
tool: 'customBiggerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ inputs.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.MACHINE }}/${{ matrix.TestType }}/bandwidth/highapp

- name: "Update Latency Results : High Speed App"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: app/highapp_time.json
tool: 'customSmallerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ inputs.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.MACHINE }}/${{ matrix.TestType }}/time/highapp

# ---------------------------------------------------------------------------------------
# Run Rename tests
# This needs to run seperatly as output is different
- name: "Rename Test"
shell: bash
run: |
rm -rf /mnt/blob_mnt/*
rm -rf /mnt/tempcache/*
./perf_testing/scripts/fio_bench.sh /mnt/blob_mnt rename

- name: "Update Latency Results : Rename"
uses: benchmark-action/github-action-benchmark@v1
with:
output-file-path: rename/rename_time.json
tool: 'customSmallerIsBetter'
alert-threshold: "160%"
max-items-in-chart: 100
github-token: ${{ inputs.GITHUB_TOKEN }}
fail-on-alert: true
auto-push: true
comment-on-alert: true
gh-pages-branch: benchmarks
benchmark-data-dir-path: ${{ inputs.MACHINE }}/${{ matrix.TestType }}/time/rename
# ---------------------------------------------------------------------------------------

Loading
Loading