Heya, I’ve been attempting to get NNE running on 5.4 and have been struggling to translate some things from the older 5.2 quick start guide (chose this one as it’s for getting a blueprint going). I did go through and replace all the “NNECore” references with just “NNE” and for the most part it’s fine, I just have a few errors that stop me from building and have had no luck fixing them so far.
I’ve got all of the NNE related plugins enabled just in case and have added NNE to the project’s build file.
My files, they are mostly identical to the guide.
NeuralNetworkObject.h
#pragma once
#include "CoreMinimal.h"
#include "UObject/NoExportTypes.h"
#include "NNE.h"
#include "NNEModelData.h"
#include "NNERuntimeCPU.h"
#include "NNERuntimeGPU.h"
#include "NNERuntime.h"
#include "NNETypes.h"
#include "NNETensor.h"
#include "NeuralNetworkObject.generated.h"
USTRUCT(BlueprintType, Category = "NeuralNetworkObject")
struct FNeuralNetworkTensor
{
GENERATED_BODY()
public:
UPROPERTY(BlueprintReadWrite, Category = "NeuralNetworkObject")
TArray<int32> Shape = TArray<int32>();
UPROPERTY(BlueprintReadWrite, Category = "NeuralNetworkObject")
TArray<float> Data = TArray<float>();
};
UCLASS(BlueprintType, Category = "NeuralNetworkObject")
class PROJECT_API UNeuralNetworkObject : public UObject
{
GENERATED_BODY()
public:
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
static TArray<FString> GetRuntimeNames();
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
static UNeuralNetworkObject* CreateModel(UObject* Parent, FString RuntimeName, UNNEModelData* ModelData);
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
static bool CreateTensor(TArray<int32> Shape, UPARAM(ref) FNeuralNetworkTensor& Tensor);
public:
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
int32 NumInputs();
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
int32 NumOutputs();
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
TArray<int32> GetInputShape(int32 Index);
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
TArray<int32> GetOutputShape(int32 Index);
public:
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
bool SetInputs(const TArray<FNeuralNetworkTensor>& Inputs);
UFUNCTION(BlueprintCallable, Category = "NeuralNetworkObject")
bool RunSync(UPARAM(ref) TArray<FNeuralNetworkTensor>& Outputs);
private:
TSharedPtr<UE::NNE::IModelInstanceCPU> Model;
TArray<UE::NNE::FTensorBindingCPU> InputBindings;
TArray<UE::NNE::FTensorShape> InputShapes;
};
NeuralNetworkObject.cpp
#include "NeuralNetworkObject.h"
TArray<FString> UNeuralNetworkObject::GetRuntimeNames()
{
using namespace UE::NNE;
TArray<FString> Result;
TArrayView<TWeakInterfacePtr<INNERuntime>> Runtimes = GetAllRuntimes();
for (int32 i = 0; i < Runtimes.Num(); i++)
{
if (Runtimes[i].IsValid() && Cast<INNERuntimeCPU>(Runtimes[i].Get()))
{
Result.Add(Runtimes[i]->GetRuntimeName());
}
}
return Result;
}
UNeuralNetworkObject* UNeuralNetworkObject::CreateModel(UObject* Parent, FString RuntimeName, UNNEModelData* ModelData)
{
using namespace UE::NNE;
if (!ModelData)
{
UE_LOG(LogTemp, Error, TEXT("Invalid model data"));
return nullptr;
}
TWeakInterfacePtr<INNERuntimeCPU> Runtime = GetRuntime<INNERuntimeCPU>(RuntimeName);
if (!Runtime.IsValid())
{
UE_LOG(LogTemp, Error, TEXT("No CPU runtime '%s' found"), *RuntimeName);
return nullptr;
}
TUniquePtr<IModelCPU> UniqueModel = Runtime->CreateModelCPU(ModelData);
if (!UniqueModel.IsValid())
{
UE_LOG(LogTemp, Error, TEXT("Could not create the CPU model"));
return nullptr;
}
UNeuralNetworkObject* Result = NewObject<UNeuralNetworkObject>(Parent);
if (Result)
{
Result->Model = TSharedPtr<IModelCPU>(UniqueModel.Release());
return Result;
}
return nullptr;
}
bool UNeuralNetworkObject::CreateTensor(TArray<int32> Shape, UPARAM(ref) FNeuralNetworkTensor& Tensor)
{
if (Shape.Num() == 0)
{
return false;
}
int32 Volume = 1;
for (int32 i = 0; i < Shape.Num(); i++)
{
if (Shape[i] < 1)
{
return false;
}
Volume *= Shape[i];
}
Tensor.Shape = Shape;
Tensor.Data.SetNum(Volume);
return true;
}
int32 UNeuralNetworkObject::NumInputs()
{
check(Model.IsValid())
return Model->GetInputTensorDescs().Num();
}
int32 UNeuralNetworkObject::NumOutputs()
{
check(Model.IsValid())
return Model->GetOutputTensorDescs().Num();
}
TArray<int32> UNeuralNetworkObject::GetInputShape(int32 Index)
{
check(Model.IsValid())
using namespace UE::NNE;
TConstArrayView<FTensorDesc> Desc = Model->GetInputTensorDescs();
if (Index < 0 || Index >= Desc.Num())
{
return TArray<int32>();
}
return TArray<int32>(Desc[Index].GetShape().GetData());
}
TArray<int32> UNeuralNetworkObject::GetOutputShape(int32 Index)
{
check(Model.IsValid())
using namespace UE::NNE;
TConstArrayView<FTensorDesc> Desc = Model->GetOutputTensorDescs();
if (Index < 0 || Index >= Desc.Num())
{
return TArray<int32>();
}
return TArray<int32>(Desc[Index].GetShape().GetData());
}
bool UNeuralNetworkObject::SetInputs(const TArray<FNeuralNetworkTensor>& Inputs)
{
check(Model.IsValid())
using namespace UE::NNE;
InputBindings.Reset();
InputShapes.Reset();
TConstArrayView<FTensorDesc> InputDescs = Model->GetInputTensorDescs();
if (InputDescs.Num() != Inputs.Num())
{
UE_LOG(LogTemp, Error, TEXT("Invalid number of input tensors provided"));
return false;
}
InputBindings.SetNum(Inputs.Num());
InputShapes.SetNum(Inputs.Num());
for (int32 i = 0; i < Inputs.Num(); i++)
{
InputBindings[i].Data = (void*)Inputs[i].Data.GetData();
InputBindings[i].SizeInBytes = Inputs[i].Data.Num() * sizeof(float);
InputShapes[i] = FTensorShape::MakeFromSymbolic(FSymbolicTensorShape::Make(Inputs[i].Shape));
}
if (Model->SetInputTensorShapes(InputShapes) != 0)
{
UE_LOG(LogTemp, Error, TEXT("Failed to set the input shapes"));
return false;
}
return true;
}
bool UNeuralNetworkObject::RunSync(UPARAM(ref) TArray<FNeuralNetworkTensor>& Outputs)
{
check(Model.IsValid());
using namespace UE::NNE;
TConstArrayView<FTensorDesc> OutputDescs = Model->GetOutputTensorDescs();
if (OutputDescs.Num() != Outputs.Num())
{
UE_LOG(LogTemp, Error, TEXT("Invalid number of output tensors provided"));
return false;
}
TArray<FTensorBindingCPU> OutputBindings;
OutputBindings.SetNum(Outputs.Num());
for (int32 i = 0; i < Outputs.Num(); i++)
{
OutputBindings[i].Data = (void*)Outputs[i].Data.GetData();
OutputBindings[i].SizeInBytes = Outputs[i].Data.Num() * sizeof(float);
}
return Model->RunSync(InputBindings, OutputBindings) == 0;
}
Cheers