Skip to content

ci: Add github workflow for nitro node #59

ci: Add github workflow for nitro node

ci: Add github workflow for nitro node #59

name: Build nitro-node
on:
schedule:
- cron: "0 20 * * *" # At 0:20 UTC, which is 7:20 AM UTC+7
push:
branches:
- main
tags: ["v[0-9]+.[0-9]+.[0-9]+"]
paths: [".github/workflows/build-nitro-node.yml", "nitro-node"]
pull_request:
types: [opened, synchronize, reopened]
paths: [".github/workflows/build-nitro-node.yml", "nitro-node"]
workflow_dispatch:
jobs:
ubuntu-amd64-non-cuda-build:
runs-on: ubuntu-latest
steps:
- name: Clone
id: checkout
uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
with:
node-version: 18
- name: Restore cached model file
id: cache-model-restore
uses: actions/cache/restore@v4
with:
path: |
nitro-node/test/test_assets/*.gguf
key: ${{ runner.os }}-model-gguf
- uses: suisei-cn/[email protected]
id: download-model-file
name: Download model file
with:
url: "The model we are using is [tinyllama-1.1b](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf)!"
target: nitro-node/test/test_assets/
auto-match: true
retry-times: 3
- name: Save downloaded model file to cache
id: cache-model-save
uses: actions/cache/save@v4
with:
path: |
nitro-node/test/test_assets/*.gguf
key: ${{ steps.cache-model-restore.outputs.cache-primary-key }}
- name: Run tests
id: test_nitro_node
run: |
cd nitro-node
make clean test-ci
#ubuntu-amd64-build:
# runs-on: ubuntu-18-04-cuda-11-7
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v4
# with:
# submodules: recursive
# - uses: actions/setup-node@v4
# with:
# node-version: 18
# - name: Restore cached model file
# id: cache-model-restore
# uses: actions/cache/restore@v4
# with:
# path: |
# nitro-node/test/test_assets/*.gguf
# key: ${{ runner.os }}-model-gguf
# - uses: suisei-cn/[email protected]
# id: download-model-file
# name: Download model file
# with:
# url: "The model we are using is [tinyllama-1.1b](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf)!"
# target: nitro-node/test/test_assets/
# auto-match: true
# retry-times: 3
# - name: Save downloaded model file to cache
# id: cache-model-save
# uses: actions/cache/save@v4
# with:
# path: |
# nitro-node/test/test_assets/*.gguf
# key: ${{ steps.cache-model-restore.outputs.cache-primary-key }}
# - name: Run tests
# id: test_nitro_node
# run: |
# cd nitro-node
# make clean test-ci
#ubuntu-amd64-cuda-build:
# runs-on: ubuntu-18-04-cuda-${{ matrix.cuda }}
# strategy:
# matrix:
# cuda: ["12-0", "11-7"]
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v4
# with:
# submodules: recursive
# - uses: actions/setup-node@v4
# with:
# node-version: 18
# - name: Restore cached model file
# id: cache-model-restore
# uses: actions/cache/restore@v4
# with:
# path: |
# nitro-node/test/test_assets/*.gguf
# key: ${{ runner.os }}-model-gguf
# - uses: suisei-cn/[email protected]
# id: download-model-file
# name: Download model file
# with:
# url: "The model we are using is [tinyllama-1.1b](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf)!"
# target: nitro-node/test/test_assets/
# auto-match: true
# retry-times: 3
# - name: Save downloaded model file to cache
# id: cache-model-save
# uses: actions/cache/save@v4
# with:
# path: |
# nitro-node/test/test_assets/*.gguf
# key: ${{ steps.cache-model-restore.outputs.cache-primary-key }}
# - name: Run tests
# id: test_nitro_node
# run: |
# cd nitro-node
# make clean test-ci
macOS-M-build:
runs-on: macos-14
steps:
- name: Clone
id: checkout
uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
with:
node-version: 18
- name: Restore cached model file
id: cache-model-restore
uses: actions/cache/restore@v4
with:
path: |
nitro-node/test/test_assets/*.gguf
key: ${{ runner.os }}-model-gguf
- uses: suisei-cn/[email protected]
id: download-model-file
name: Download model file
with:
url: "The model we are using is [tinyllama-1.1b](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf)!"
target: nitro-node/test/test_assets/
auto-match: true
retry-times: 3
- name: Save downloaded model file to cache
id: cache-model-save
uses: actions/cache/save@v4
with:
path: |
nitro-node/test/test_assets/*.gguf
key: ${{ steps.cache-model-restore.outputs.cache-primary-key }}
- name: Run tests
id: test_nitro_node
run: |
cd nitro-node
make clean test-ci
macOS-Intel-build:
runs-on: macos-latest
steps:
- name: Clone
id: checkout
uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
with:
node-version: 18
- name: Restore cached model file
id: cache-model-restore
uses: actions/cache/restore@v4
with:
path: |
nitro-node/test/test_assets/*.gguf
key: ${{ runner.os }}-model-gguf
- uses: suisei-cn/[email protected]
id: download-model-file
name: Download model file
with:
url: "The model we are using is [tinyllama-1.1b](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf)!"
target: nitro-node/test/test_assets/
auto-match: true
retry-times: 3
- name: Save downloaded model file to cache
id: cache-model-save
uses: actions/cache/save@v4
with:
path: |
nitro-node/test/test_assets/*.gguf
key: ${{ steps.cache-model-restore.outputs.cache-primary-key }}
- name: Run tests
id: test_nitro_node
run: |
cd nitro-node
make clean test-ci
#windows-amd64-build:
# runs-on: windows-latest
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v4
# with:
# submodules: recursive
# - uses: actions/setup-node@v4
# with:
# node-version: 18
# - name: Setup VSWhere.exe
# uses: warrenbuckley/Setup-VSWhere@v1
# with:
# version: latest
# silent: true
# env:
# ACTIONS_ALLOW_UNSECURE_COMMANDS: true
# - name: Restore cached model file
# id: cache-model-restore
# uses: actions/cache/restore@v4
# with:
# path: |
# nitro-node/test/test_assets/*.gguf
# key: ${{ runner.os }}-model-gguf
# - uses: suisei-cn/[email protected]
# id: download-model-file
# name: Download model file
# with:
# url: "The model we are using is [tinyllama-1.1b](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf)!"
# target: nitro-node/test/test_assets/
# auto-match: true
# retry-times: 3
# - name: Save downloaded model file to cache
# id: cache-model-save
# uses: actions/cache/save@v4
# with:
# path: |
# nitro-node/test/test_assets/*.gguf
# key: ${{ steps.cache-model-restore.outputs.cache-primary-key }}
# - name: Run tests
# id: test_nitro_node
# run: |
# cd nitro-node
# make clean test-ci
#windows-amd64-cuda-build:
# runs-on: windows-cuda-${{ matrix.cuda }}
# strategy:
# matrix:
# cuda: ["12-0", "11-7"]
# steps:
# - name: Clone
# id: checkout
# uses: actions/checkout@v4
# with:
# submodules: recursive
# - uses: actions/setup-node@v4
# with:
# node-version: 18
# - name: actions-setup-cmake
# uses: jwlawson/[email protected]
# - name: Setup VSWhere.exe
# uses: warrenbuckley/Setup-VSWhere@v1
# with:
# version: latest
# silent: true
# env:
# ACTIONS_ALLOW_UNSECURE_COMMANDS: true
# - uses: actions/setup-dotnet@v3
# with:
# dotnet-version: "6.0.x"
# - name: Restore cached model file
# id: cache-model-restore
# uses: actions/cache/restore@v4
# with:
# path: |
# nitro-node/test/test_assets/*.gguf
# key: ${{ runner.os }}-model-gguf
# - uses: suisei-cn/[email protected]
# id: download-model-file
# name: Download model file
# with:
# url: "The model we are using is [tinyllama-1.1b](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf)!"
# target: nitro-node/test/test_assets/
# auto-match: true
# retry-times: 3
# - name: Save downloaded model file to cache
# id: cache-model-save
# uses: actions/cache/save@v4
# with:
# path: |
# nitro-node/test/test_assets/*.gguf
# key: ${{ steps.cache-model-restore.outputs.cache-primary-key }}
# - name: Run tests
# id: test_nitro_node
# run: |
# cd nitro-node
# make clean test-ci