Base64图片转Texture2D
bool ConvertBase64ToTexture2D(FString Base64Data, UTexture2D*& OutTexture)
OutTexture = nullptr;
TArray<uint8> data_buffer;
if (Base64Data.IsEmpty()) return false;
FString Left, Right;
// you need to remove Data:image....,
Base64Data.Split(TEXT(","), &Left, &Right);
bool isDecode = FBase64::Decode(Right, data_buffer);
if (isDecode) {
UTexture2D* TempTexture = FImageUtils::ImportBufferAsTexture2D(data_buffer);
//TempTexture->MipGenSettings = TMGS_NoMipmaps;
TempTexture->CompressionSettings = TextureCompressionSettings::TC_VectorDisplacementmap;
TempTexture->SRGB = true;
TempTexture->Filter = TextureFilter::TF_Nearest;
TempTexture->UpdateResource();
OutTexture = TempTexture;
return true;
return false;
K4a::Image 转为Texture2D
void AzureKinectDevice::k4a2TextureRGB(k4a::image inImg)
uint8_t* data = inImg.get_buffer();
int T_Width = inImg.get_width_pixels();
int T_height = inImg.get_height_pixels();
UTexture2D* NewTexture = UTexture2D::CreateTransient((int32)T_Width, (int32)T_height);
//if it's size is stable,shouldn't create Utexture2D in while() loop func or tick
void* Datas = NewTexture->PlatformData->Mips[0].BulkData.Lock(LOCK_READ_WRITE);
FMemory::Memcpy(Datas, data, inImg.get_size());
//call of tick or while(1), FMemory::Memcpy will cause RAM increasing to almost full ,although Temporary object will be released
//suggest : FMemory::Memmove(Datas, data, inImg.get_size());
NewTexture->PlatformData->Mips[0].BulkData.Unlock();
NewTexture->UpdateResource();
//if 16 bit depth image,you should change the UTexture2D format like this
UTexture2D* AzureKinectDevice::CreateNewTexture(int w, int h, EPixelFormat InFormat)
UTexture2D* tex = UTexture2D::CreateTransient(w, h, InFormat);
tex->AddressX = TextureAddress::TA_Mirror;
tex->AddressY = TextureAddress::TA_Mirror;
tex->Filter = TextureFilter::TF_Bilinear;
tex->RefreshSamplerStates();
tex->UpdateResource();
return tex;
void AzureKinectDevice::MakeTexture(const k4a::image& inImg)
//if runtime update Utexture2D* ,you should create inMem as global var;
Utexture2D* inMem = CreateNewTexture((int32)1920, (int32)1080, EPixelFormat::PF_G16);//传输16bit的深度图
void* Datas = inMem->PlatformData->Mips[0].BulkData.Lock(LOCK_READ_WRITE);
FMemory::Memmove(Datas, inImg.get_buffer(), inImg.get_size());
inMem->PlatformData->Mips[0].BulkData.Unlock();
inMem->UpdateResource();
K4a::Image 转cv::Mat
//k4a RGB image
cv::Mat AzureKinectDevice::K4a2Mat(k4a::image rgb)
int hight = k4a_image_get_height_pixels(rgb.handle());
int width = k4a_image_get_width_pixels(rgb.handle());
cv::Mat Color_frame = cv::Mat(hight, width, CV_8UC4, k4a_image_get_buffer(rgb.handle()));
return Color_frame;
//k4a depth image
cv::Mat AzureKinectDevice::k4a2Mat_Depth(k4a::image indepth)
int32 hight = indepth.get_width_pixels();
int32 width = indepth.get_height_pixels();
cv::Mat Depth_frame = cv::Mat(hight, width, CV_16UC1, reinterpret_cast<uint16_t*>(k4a_image_get_buffer(indepth.handle())));
Depth_frame.convertTo(Depth_frame, CV_8UC1, 255.0 / 5000, 0.0);
cv::Mat outDepth;
cv::cvtColor(Depth_frame, outDepth, CV_GRAY2RGBA);
return outDepth;
}
cv::Mat转UTexture2D
void AzureKinectDevice::Mat2Texture(cv::Mat Src)
if (!Src.empty())
//RealSense D435,435i,T and so on ; cv::Mat (CV_8UC3 to use this)
UTexture2D* Texture = UTexture2D::CreateTransient(Src.cols, Src.rows);
Texture->SRGB = 0;
DataSize = frame.cols * frame.rows * ch;
const uint8 ch = 4;
uint8* ColorData = new uint8[Datasize];
for (int y = 0, i = 0; y < Src.rows; ++y)
for (int x = 0; x < Src.cols; ++x, ++i)
ColorData[i * ch + 0] = Src.data[i * 3 + 2];
ColorData[i * ch + 1] = Src.data[i * 3 + 1];
ColorData[i * ch + 2] = Src.data[i * 3 + 0];
void* Datas = Texture->PlatformData->Mips[0].BulkData.Lock(LOCK_READ_WRITE);
FMemory::Memcpy(Datas, ColorData, DataSize);
Texture->PlatformData->Mips[0].BulkData.Unlock();
Texture->UpdateResource();
////KinectDK; cv::Mat (CV_8UC4 to use this)
UTexture2D* NewTexture = UTexture2D::CreateTransient((int32)Src.cols, (int32)Src.rows);
NewTexture->SRGB = 0;
int32 DataSize = Src.cols * Src.rows;
ColorData.Init(FColor(0, 0, 0, 255), DataSize);
for (int y = 0; y < Src.rows; ++y)
for (int x = 0; x < Src.cols; ++x)
int i = x + (y *Src.cols);
ColorData[i] = FColor(Src.data[i * 4 + 2], Src.data[i * 4 + 1], Src.data[i * 4 + 0], 255);
void* Datas = NewTexture->PlatformData->Mips[0].BulkData.Lock(LOCK_READ_WRITE);
FMemory::Memcpy(Datas, ColorData.GetData(), ColorData.Num() * sizeof(FColor));
NewTexture->PlatformData->Mips[0].BulkData.Unlock();
NewTexture->UpdateResource();
}
rs2::frame 转cv::Mat
cv::Mat ARSActor::Frames2Mat(const rs2::frame& frame)
//frame Convet Mat Quick Start;
int iwidth=frame.as<rs2::video_frame>().get_width();
int iheight = frame.as<rs2::video_frame>().get_height();
cv:;Mat Depth_Image=cv::Mat(cv::Size(iwidth, iheight), CV_8UC3, (void*)depth_f.get_data(), cv::Mat::AUTO_STEP);
//cv::Mat Color_Image = cv::Mat(cv::Size(iwidth, iheight), CV_8UC3, (void*)color_f.get_data(), cv::Mat::AUTO_STEP);
auto vf = frame.as<rs2::video_frame>();
const int w = vf.get_width();
const int h = vf.get_height();
if (frame.get_profile().format() == RS2_FORMAT_BGR8)
return cv::Mat(cv::Size(w, h), CV_8UC3, (void *)frame.get_data(), cv::Mat::AUTO_STEP);
else if (frame.get_profile().format() == RS2_FORMAT_RGB8)
auto r = cv::Mat(cv::Size(w, h), CV_8UC3, (void *)frame.get_data(), cv::Mat::AUTO_STEP);
cv::cvtColor(r, r, CV_RGB2BGR);
return r;
else if (frame.get_profile().format() == RS2_FORMAT_Z16)
return cv::Mat(cv::Size(w, h), CV_16UC1, (void *)frame.get_data(), cv::Mat::AUTO_STEP);
else if (frame.get_profile().format() == RS2_FORMAT_Y8)
return cv::Mat(cv::Size(w, h), CV_8UC1, (void *)frame.get_data(), cv::Mat::AUTO_STEP);