Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Torch WIP #358

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ set(CMAKE_CXX_FLAGS_RELEASE "-O2")

# Options
option(NERLWOLF "Use Wolfram Engine workers extension" OFF)
option(NERLTORCH "use libtorch installed to /usr/local/lib/libtorch" OFF)
option(USE_OpenMP "Use-OpenMP" ON)

#add_compile_definitions(EIGEN_MAX_ALIGN_BYTES=8) #Open this line for RASPI
Expand All @@ -42,3 +43,8 @@ if (NERLWOLF)
add_library(nerlnet_wolf SHARED $<TARGET_OBJECTS:wolframBridge>)
target_link_libraries(nerlnet_wolf PUBLIC wolframBridge)
endif()

if (NERLTORCH)
add_library(nerlnet_torch SHARED $<TARGET_OBJECTS:torchBridge>)
target_link_libraries(nerlnet_torch PUBLIC torchBridge)
endif()
29 changes: 27 additions & 2 deletions NerlnetBuild.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,25 @@ INPUT_DATA_DIR="inputDataDir"
Branch="master"
JobsNum=4
NerlWolf=OFF
NerlTorch=OFF

help()
{
echo "-------------------------------------" && echo "Nerlnet Build" && echo "-------------------------------------"
echo "Usage:"
echo "--p or --pull Warning! this uses checkout -f! and branch name checkout to branch $Branch and pull the latest"
echo "--w or --wolf wolfram engine workers extension (nerlwolf)"
echo "--w or --wolf wolfram engine workers infra (nerlwolf)"
echo "--t or --torch torch workers infra (nerltorch)"
echo "--j or --jobs number of jobs to cmake build"
echo "--c or --clean remove build directory"
exit 2
}

print()
{
echo "$NERLNET_BUILD_PREFIX $1"
}

gitOperations()
{
echo "$NERLNET_PREFIX Warning! git checkout -f is about to be executed"
Expand Down Expand Up @@ -67,6 +74,8 @@ print_help()
printf '\t%s\n' "-j, --jobs: number of jobs (default: '4')"
printf '\t%s\n' "-p, --pull: pull from branch (default: '4')"
printf '\t%s\n' "-w, --wolf: wolfram engine extension build (default: 'off')"
printf '\t%s\n' "-t, --torch: torch engine extension build (default: 'off')"
printf '\t%s\n' "-c, --clean: clean build directory (default: 'off')"

}

Expand Down Expand Up @@ -110,6 +119,17 @@ parse_commandline()
-w*)
NerlWolf="${_key##-j}"
;;
-t|--torch)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
NerlTorch="$2"
shift
;;
--torch=*)
NerlTorch="${_key##--jobs=}"
;;
-t*)
NerlTorch="${_key##-j}"
;;
-j|--jobs)
test $# -lt 2 && die "Missing value for the optional argument '$_key'." 1
JobsNum="$2"
Expand Down Expand Up @@ -157,6 +177,11 @@ else
sed -i "s/^.*\(${OPTION}.*$\)/#\1/" CMakeLists.txt
fi

if [[ ! $NerlTorch =~ OFF ]]; then
print "NerlTorch is enabled"
print "Installation directory points to /usr/local/lib/libtorch"
fi

if command -v python3 >/dev/null 2>&1; then
echo "$NERLNET_BUILD_PREFIX Python 3 is installed"
# Generate auto-generated files
Expand Down Expand Up @@ -187,7 +212,7 @@ fi
echo "$NERLNET_BUILD_PREFIX Building Nerlnet Library"
echo "$NERLNET_BUILD_PREFIX Cmake command of Nerlnet NIFPP"
set -e
cmake -S . -B build/release -DNERLWOLF=$NerlWolf -DCMAKE_BUILD_TYPE=RELEASE
cmake -S . -B build/release -DNERLWOLF=$NerlWolf -DNERLTORCH=$NerlTorch -DCMAKE_BUILD_TYPE=RELEASE
cd build/release
echo "$NERLNET_BUILD_PREFIX Script CWD: $PWD"
echo "$NERLNET_BUILD_PREFIX Build Nerlnet"
Expand Down
5 changes: 5 additions & 0 deletions src_cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,9 @@ add_subdirectory(source)
if(NERLWOLF)
message("[NERLNET] Wolfram Engine nif extension is enabled")
add_subdirectory(wolframBridge)
endif()

if(NERLTORCH)
message("[NERLNET] Libtorch nif extension is enabled")
add_subdirectory(torchBridge)
endif()
46 changes: 46 additions & 0 deletions src_cpp/torchBridge/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
#****************************************************
# Authors: David Leon
# 29/10/2021
#
# @copyright Copyright (c) 2021 Nerlnet
# *****************************************************/

project(torchBridge)

set(NIFPP_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../nifpp/")
set(SIMPLE_LOGGER_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../simple-cpp-logger/include")
set(COMMON_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../common")
set(Torch_DIR "/usr/local/lib/libtorch/share/cmake/Torch")

find_package(Torch REQUIRED "TorchConfig.cmake" CONFIG )

set(CMAKE_VERBOSE_MAKEFILE ON)
set(CMAKE_CXX_FLAGS "-fpic")
set(ERL_NIF_DEFAULT_LOCATION "/usr/local/lib/erlang/usr/include")

# cpp Simple logger options
add_definitions( -D LOGGER_MAX_LOG_LEVEL_PRINTED=6 )
add_definitions( -D LOGGER_PREFIX_LEVEL=2 )
add_definitions( -D LOGGER_ENABLE_COLORS=1 )
add_definitions( -D LOGGER_ENABLE_COLORS_ON_USER_HEADER=0 )

set(SRC_CODE
"nerltensorTorchDefs.h"
"nifppNerlTensorTorch.h"
"torchNIF.h"
"torchNIF.cpp"
"NerlWorkerTorch.h"
"NerlWorkerTorch.cpp"
"NerlWorkerTorchNIF.h"
)

add_library(${PROJECT_NAME} SHARED ${SRC_CODE})

target_link_libraries(${PROJECT_NAME} "${TORCH_LIBRARIES}" common)

# Include NIF, OpenNN and Simple Cpp Logger
target_include_directories(${PROJECT_NAME} PUBLIC
${COMMON_PATH}
${NIFPP_PATH}
${SIMPLE_LOGGER_PATH}
${ERL_NIF_DEFAULT_LOCATION})
1 change: 1 addition & 0 deletions src_cpp/torchBridge/NerlWorkerTorch.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
#include "NerlWorkerTorch.h"
19 changes: 19 additions & 0 deletions src_cpp/torchBridge/NerlWorkerTorch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
#pragma once

#include <cassert>
#include <Logger.h>

#include "../common/nerlWorker.h"
#include "worker_definitions_ag.h"
#include "nifppNerlTensorTorch.h"


namespace nerlnet
{

class NerlWorkerTorch : public NerlWorker
{

};

} // namespace nerlnet
2 changes: 2 additions & 0 deletions src_cpp/torchBridge/NerlWorkerTorchNIF.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
#pragma once

13 changes: 13 additions & 0 deletions src_cpp/torchBridge/Readme.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
## Torch Bridge

(Unsupported yet)
This is a bridge that extends Nerlnet to support libtorch as cpp neural network library.

### Installation

1. Go to [Pytorch site](https://pytorch.org/get-started/locally/) and download libtorch
2. Extract libotorch to ```/usr/local/lib/libtorch```
3. Execute Nerlnet build with --t=ON or --torch=ON
4. Select worker infrastructe in Nerlplanner as torch.

For apple silicon use [this repo](https://github.com/Nerlnet/libtorch_compiled) to download compiled libtorch.
14 changes: 14 additions & 0 deletions src_cpp/torchBridge/nerltensorTorchDefs.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
#pragma once

#include <nerltensor.h>
#include <torch/torch.h>

namespace nerlnet
{

using TorchTensor = torch::Tensor;

enum {DIMS_CASE_1D,DIMS_CASE_2D,DIMS_CASE_3D};
enum {DIMS_X_IDX,DIMS_Y_IDX,DIMS_Z_IDX,DIMS_TOTAL};

} // namespace nerlnet
120 changes: 120 additions & 0 deletions src_cpp/torchBridge/nifppNerlTensorTorch.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
#pragma once

#include "nifpp.h"
#include "nerltensorTorchDefs.h"


namespace nifpp
{
using namespace nerlnet;

struct nerltensor_dims
{
int dimx;
int dimy;
int dimz;
int total_size;
int dims_case;
};

// Declarations
template<typename BasicType> int get_nerltensor_dims(ErlNifEnv *env , ERL_NIF_TERM bin_term, nerltensor_dims &dims_info);
template<typename BasicType> int get_nerltensor(ErlNifEnv *env , ERL_NIF_TERM bin_term, TorchTensor &tensor, torch::ScalarType torch_dtype);
template<typename BasicType> void make_tensor(ErlNifEnv *env , nifpp::TERM &ret_bin_term, TorchTensor &tensor);


// Definitions
template<typename BasicType> int get_nerltensor_dims(ErlNifEnv *env , ERL_NIF_TERM bin_term, nerltensor_dims &dims_info)
{
ErlNifBinary bin;
int ret = enif_inspect_binary(env, bin_term, &bin);
assert(ret != 0);

std::vector<BasicType> dims;
// extract dims and data size
dims.resize(DIMS_TOTAL);
memcpy(dims.data(), bin.data, DIMS_TOTAL * sizeof(BasicType));

dims_info.total_size = 1;
for (int i=0; i < DIMS_TOTAL; i++)
{
dims_info.total_size *= dims[i];
if (dims[i] > 1)
{
dims_info.dims_case = i;
}
}
assert(("Negative Or zero value of dimension", dims_info.total_size > 0));


dims_info.dimx = static_cast<int>(dims[DIMS_X_IDX]);
dims_info.dimy = static_cast<int>(dims[DIMS_Y_IDX]);
dims_info.dimz = static_cast<int>(dims[DIMS_Z_IDX]);
}


template<typename BasicType> int get_nerltensor(ErlNifEnv *env , ERL_NIF_TERM bin_term, TorchTensor &tensor, torch::ScalarType torch_dtype)
{
ErlNifBinary bin;
int ret = enif_inspect_binary(env, bin_term, &bin);
assert(ret != 0);

// extract dims and data size
nerltensor_dims dims_info;
get_nerltensor_dims<BasicType>(env, bin_term, dims_info);

switch (dims_info.dims_case)
{
case DIMS_CASE_1D:
{
tensor = torch::zeros(dims_info.dimx, torch_dtype);
break;
}
case DIMS_CASE_2D:
{
tensor = torch::zeros({dims_info.dimx, dims_info.dimy}, torch_dtype);
break;
}
case DIMS_CASE_3D:
{
tensor = torch::zeros({dims_info.dimx, dims_info.dimy, dims_info.dimz}, torch_dtype);
break;
}
}

assert((sizeof(BasicType) == tensor.element_size(), "Size of BasicType and torch tensor element size mismatch"));

// copy data from nerltensor to torch tensor
int skip_dims_bytes = (DIMS_TOTAL * sizeof(BasicType));
std::memcpy(tensor.data_ptr(),bin.data + skip_dims_bytes, sizeof(BasicType)*tensor.numel());
}

template<typename BasicType> void make_tensor(ErlNifEnv *env , nifpp::TERM &ret_bin_term, TorchTensor &tensor)
{
std::vector<BasicType> dims;
dims.resize(DIMS_TOTAL);
for (int dim=0; dim < DIMS_TOTAL; dim++)
{
if (dim < tensor.sizes().Length())
{
dims[dim] = static_cast<BasicType>(tensor.sizes()[dim]);
}
else
{
dims[dim] = 1;
}
}
size_t dims_size = DIMS_TOTAL * sizeof(BasicType);
size_t data_size = tensor.numel() * sizeof(BasicType);

nifpp::binary nifpp_bin(dims_size + data_size);

assert((sizeof(BasicType) == tensor.element_size(), "Size of BasicType and torch tensor element size mismatch"));

std::memcpy(nifpp_bin.data, dims.data(), dims_size);
std::memcpy(nifpp_bin.data + dims_size, tensor.data_ptr(), data_size);

ret_bin_term = nifpp::make(env, nifpp_bin);
}

}
13 changes: 13 additions & 0 deletions src_cpp/torchBridge/torchNIF.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#include "torchNIF.h"


void* train_threaded_function(void* args)
{

}


void* predict_threaded_function(void* args)
{

}
Loading