• libtorch 手打代码学习


    #include <iostream>
    #include <memory>
    #include <string>
    #include <torch/script.h>
    #include <opencv2/opencv.hpp>
    #include <opencv2/core/core.hpp>
    #include <opencv2/imgproc/imgproc.hpp>
    #include "opencv2/imgproc/types_c.h"
    
    using namespace std;
    using namespace cv;
    
    int main() {
    	//参考:https://www.cnblogs.com/yanghailin/p/12901586.html
    
    	////调试技巧
    	//torch::Tensor box_1 = torch::rand({ 5,4 });
    	//std::cout << box_1 << std::endl; //可以打印出数值,以及形状信息
    	//box_1.print();//只打印形状
    
    	//CMakeLists.txt 具体写法
    
    	////0.torch::full_like
    	//torch::Tensor tmp_1 = torch::rand({ 2,3 });
    	//torch::Tensor tmp_2 = torch::full_like(tmp_1, 1);
    	//cout << tmp_1 << endl;
    	//cout << tmp_2 << endl;
    
    	////1.生成数据的函数
    	////1.1 torch::rand
    	//torch::Tensor input = torch::rand({ 1,3,2,3 });
    	//cout<<input<<endl;
    
    	////1.2 torch::empty
    	//torch::Tensor a = torch::empty({ 2, 4 });
    	//std::cout << a << std::endl;
    
    	////1.3 torch::ones
    	//torch::Tensor a = torch::ones({ 2, 4 });
    	//std::cout << a << std::endl;
    
    	////1.4 torch::zeros
    	//torch::Tensor scores;
    	//torch::Tensor keep = torch::zeros({ scores.size(0) }).to(torch::kLong).to(scores.device());
    
    	////1.5 torch::full
    	//torch::Tensor num_out = torch::full({ 2,3 }, -2, torch::dtype(torch::kLong));
    	//std::cout << num_out << std::endl;
    
    	////1.6 torch::ones
    	//torch::Tensor a = torch::ones({ 3,2 }).fill_(-8).to(torch::kCUDA);
    	//std::cout << a << std::endl;
    
    	////2. 拼接tensor torch::cat 以及vector 和cat的融合操作
    	////2.1 按列拼接
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//torch::Tensor b = torch::rand({ 2,1 });
    	//torch::Tensor cat_1 = torch::cat({ a,b }, 1);//按列拼接--》》前提是行数需要一样
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    	//std::cout << cat_1 << std::endl;
    
    	////2.2 按行拼接
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//torch::Tensor b = torch::rand({ 1,3 });
    	//torch::Tensor cat_1 = torch::cat({ a,b }, 0);
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    	//std::cout << cat_1 << std::endl;
    
    	////2.3 其他例子
    	//torch::Tensor box_1 = torch::rand({ 5,4 });
    	//torch::Tensor score_1 = torch::rand({ 5,1 });
    	//torch::Tensor label_1 = torch::rand({ 5,1 });
    	//torch::Tensor result_1 = torch::cat({ box_1,score_1,label_1 }, 1);
    	//result_1.print();
    
    	////2.4 vector 和cat的融合操作
    	//torch::Tensor xs_t0 = xs - wh_0 / 2;
    	//torch::Tensor ys_t0 = ys - wh_1 / 2;
    	//torch::Tensor xs_t1 = xs + wh_0 / 2;
    	//torch::Tensor ys_t1 = ys + wh_1 / 2;
    	//xs_t0.print();
    	//ys_t0.print();
    	//xs_t1.print();
    	//ys_t1.print();
    	//vector<torch::Tensor> abce = { xs_t0,ys_t0,xs_t1,ys_t1 };
    	//torch::Tensor bboxes = torch::cat(abce, 2);
    	//std::cout << "-----cat   shape---" << std::endl;
    	//bboxes.print();
    	
    	////也可以一句话搞定:
    	//torch::Tensor bboxes = torch::cat({ xs_t0,ys_t0,xs_t1,ys_t1 }, 2);
    
    	//3.torch的切片操作 
    	//【select(浅拷贝)】【index_select 深拷贝)】【index 深拷贝】【slice 浅拷贝】 narrow,narrow_copy
    	//select【浅拷贝】只能指定取某一行或某一列
    	//index【深拷贝】只能指定取某一行
    	//index_select【深拷贝】可以按行或按列,指定多行或多列
    	//slice【浅拷贝】 连续的行或列
    	//narrow,narrow_copy
    
    	////当是浅拷贝,又不想影响之前的结果的时候,可以加个clone(),比如:
    	//torch::Tensor boxes = torch::tensor({ {1,2,3},{4,5,6} });
    	//torch::Tensor x1 = boxes.select(1, 0).clone();
    	//cout << boxes << endl;
    	//cout << x1 << endl;
    
    	////3.1 inline Tensor Tensor::select(int64_t dim, int64_t index) ;
    	////3.1.1 select//按行取
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//std::cout << a << std::endl;
    	//torch::Tensor b = a.select(0, 1);//按行取
    	//std::cout << b << std::endl;
    
    	////3.1.2 select//按列取
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//std::cout << a << std::endl;
    	//torch::Tensor b = a.select(1, 1);
    	//std::cout << b << std::endl;
    	
    	////3.1.3 select浅拷贝
    	////注意:这里是浅拷贝,就是改变b,同时a的值也会同样的改变
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//std::cout << a << std::endl;
    
    	//torch::Tensor b = a.select(1, 1);
    	//std::cout << b << std::endl;
    
    	//b[0] = 0.0;
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    
    	////3.2 inline Tensor Tensor::index_select(Dimname dim, const Tensor & index) 
    	////同样的,dim0表示按行,1表示按列 index表示取的行号或者列号,这里比较奇怪,index一定要是toType(torch::kLong)这种类型的。
    	////还有一个奇怪的地方是我准备用数组导入tensor的,发现idx全是0,原因未知
    	//torch::Tensor a = torch::rand({ 2,6 });
    	//std::cout << a << std::endl;
    	//torch::Tensor idx = torch::empty({ 4 }).toType(torch::kLong);
    	//idx[0] = 0;
    	//idx[1] = 2;
    	//idx[2] = 4;
    	//idx[3] = 1;
    
    	////int idx_data[4] = {1,3,2,4};
    	////torch::Tensor idx = torch::from_blob(idx_data,{4}).toType(torch::kLong);//idx全是0  ?????????????????
    
    	//std::cout << idx << std::endl;
    	//torch::Tensor b = a.index_select(1, idx);
    	//std::cout << b << std::endl;
    
    	////3.2.2 index_select【深拷贝】
    	//torch::Tensor a = torch::rand({ 2,6 });
    	//std::cout << a << std::endl;
    	//torch::Tensor idx = torch::empty({ 4 }).toType(torch::kLong);
    	//idx[0] = 0;
    	//idx[1] = 2;
    	//idx[2] = 4;
    	//idx[3] = 1;
    
    	////int idx_data[4] = {1,3,2,4};
    	////torch::Tensor idx = torch::from_blob(idx_data,{4}).toType(torch::kLong);
    
    	//std::cout << idx << std::endl;
    
    	//torch::Tensor b = a.index_select(1, idx);
    	//std::cout << b << std::endl;
    
    	//b[0][0] = 0.0;
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    
    	////3.3 index inline Tensor Tensor::index(TensorList indices)
    	////这个函数实验下来,只能按行取,且是深拷贝
    	//torch::Tensor a = torch::rand({ 2,6 });
    	//std::cout << a << std::endl;
    
    	//torch::Tensor idx_1 = torch::empty({ 2 }).toType(torch::kLong);
    	//idx_1[0] = 0;
    	//idx_1[1] = 1;
    
    	//torch::Tensor bb = a.index(idx_1);
    	//bb[0][0] = 0;
    
    	//std::cout << bb << std::endl;
    	//std::cout << a << std::endl;
    
    	////3.4 slice inline Tensor Tensor::slice(int64_t dim, int64_t start, int64_t end, int64_t step)
    	////dim0表示按行取,1表示按列取,从start开始,到end(不含)结束. 可以看到结果,是浅拷贝!!!
    	//torch::Tensor a = torch::rand({ 2,6 });
    	//std::cout << a << std::endl;
    
    	//torch::Tensor b = a.slice(0, 0, 1);
    	//torch::Tensor c = a.slice(1, 0, 3);
    
    	//b[0][0] = 0.0;
    	//std::cout << b << std::endl;
    	//std::cout << c << std::endl;
    	//std::cout << a << std::endl;
    	
    	////3.5 narrow narrow_copy
    	////inline Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const
    	////inline Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const
    	//torch::Tensor a = torch::rand({ 4,6 });
    	//torch::Tensor b = a.narrow(0, 1, 2);
    	//torch::Tensor c = a.narrow_copy(0, 1, 2);
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    	//std::cout << c << std::endl;
    
    	//4.squeeze() unsqueeze()
    	//inline Tensor Tensor::squeeze() const//不加参数的,把所有为1的维度都压缩
    	//inline Tensor Tensor::squeeze(int64_t dim)const//加参数的,指定哪个维度压缩
    	//inline Tensor& Tensor::squeeze_() const //_,表示inplace压缩
    	//inline Tensor& Tensor::squeeze_(int64_t dim) const //inplace压缩
    	
    	//4.1 squeeze()
    	//[Variable[CPUFloatType]{ 2,1,2,1 }]
    	//[Variable[CPUFloatType]{ 2,2 }]
    
    	////4.2 squeeze(int64_t dim) 指定压缩哪个维度
    	//torch::Tensor a = torch::rand({ 1,1,3 });
    	//std::cout << a << std::endl;
    
    	//torch::Tensor b = a.squeeze();
    	//std::cout << b << std::endl;
    
    	//torch::Tensor c = a.squeeze(0);
    	//std::cout << c << std::endl;
    
    	//torch::Tensor d = a.squeeze(1);
    	//std::cout << d << std::endl;
    
    	//torch::Tensor e = a.squeeze(2);
    	//std::cout << e << std::endl;
    
    	////4.3. unsqueeze
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//std::cout << a << std::endl;
    
    	//torch::Tensor b = a.unsqueeze(0);
    	//std::cout << b << std::endl;
    
    	//torch::Tensor bb = a.unsqueeze(1);
    	//std::cout << bb << std::endl;
    
    	//torch::Tensor bbb = a.unsqueeze(2);
    	//std::cout << bbb << std::endl;
    
    	////5.torch::nonzero 输出非0的坐标
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//a[0][1] = 0;
    	//a[1][2] = 0;
    	//std::cout << a << std::endl;
    	//torch::Tensor b = torch::nonzero(a);
    	//std::cout << b << std::endl;
    	
    	////6.访问tensor值 a.item()就把1 * 1 的 tensor的a转为float
    	////取出tensor的某个值 为int或者float == = 》》》auto bbb = a[1][1].item().toFloat();
    	////一般情况下取出tensor某个值可以直接下标索引即可。比如a[0][1], 但是这个值还是tensor类型的,要想为c++的int或者float的,如下:
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//std::cout << a << std::endl;
    	//auto bbb = a[1][1].item().toFloat();
    	////auto bbb = a.item().toFloat();
    	//std::cout << bbb << std::endl;
    
    	////6.1 torch::sort的配合item的例子
    	////CAFFE2_API std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim=-1, bool descending=false);
    	//torch::Tensor scores = torch::rand({10});
    	//std::tuple<torch::Tensor, torch::Tensor> sort_ret = torch::sort(scores.unsqueeze(1), 0, 1);
    	//torch::Tensor v = std::get<0>(sort_ret).squeeze(1).to(scores.device());//std::get<>是tuple的用法
    	//torch::Tensor idx = std::get<1>(sort_ret).squeeze(1).to(scores.device());
    	//std::cout << scores << std::endl;
    	//std::cout << v << std::endl;
    	//std::cout << idx << std::endl;
    
    	//for (int i = 0; i < 10; i++)
    	//{
    	//	int idx_1 = idx[i].item<int>();
    	//	float s = v[i].item<float>();
    
    	//	std::cout << idx_1 << "  " << s << std::endl;
    	//}
    	
    	////7.opencv Mat类型转tensor 或者其他的vector或者数组数据转tensor
    	////7.1
    	//string path = "";
    	//Mat m_out = imread(path);//[320,320,3]
    	//torch::Tensor input_tensor = torch::from_blob(m_out.data,{320,320,3 }).toType(torch::kFloat32);//torch::kByte 大坑
    	//input_tensor = input_tensor.permute({ 2,0,1 });
    	//input_tensor = input_tensor.unsqueeze(0);
    	//input_tensor = input_tensor.to(torch::kFloat).to(torch::kCPU);
    
    	////这里需要注意,因为上面图片被我预处理减均值过的,导致m_out像素值有负数,如果torch::kByte这种格式,会把负数变成正数,
    	////所以需要torch::kFloat32类型的.
    
    	////7.2
    	//std::vector<float> region_priors;
    	////region_priors.push_back(num)  region_priors的size是6375 × 4
    	//torch::Tensor m_prior = torch::from_blob(region_priors.data(), { 6375,4 }).cuda();
    
    	////8.tensor 的size sizes() numel()
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//std::cout << a << std::endl;
    
    	//auto aa = a.size(0); 
    	//auto bb = a.size(1);
    	//auto a_size = a.sizes();
    	//std::cout << aa << std::endl;
    	//std::cout << bb << std::endl;
    	//std::cout << a_size << std::endl;
    
    	//int num_ = a.numel();//元素个数
    	//std::cout << num_ << std::endl;
    
    	//9.torch::sort
    	//static inline std::tuple<Tensor, Tensor> sort(const Tensor& self, Dimname dim, bool descending)
    	//dim0表示按行,1表示按列 descending = false表示升序,true表示降序
    	//返回的是元组,第一个表示排序后的值,第二个表示排序之后对应之前的索引。
    
    	//torch::Tensor scores = torch::rand({ 10 });
    	//std::tuple<torch::Tensor, torch::Tensor> sort_ret = torch::sort(scores.unsqueeze(1), 0, 1);
    	//torch::Tensor v = std::get<0>(sort_ret).squeeze(1).to(scores.device());
    	//torch::Tensor idx = std::get<1>(sort_ret).squeeze(1).to(scores.device());
    	//std::cout << scores << std::endl;
    	//std::cout << v << std::endl;
    	//std::cout << idx << std::endl;
    	
    	////10.clamp 把数值控制在min max之间,小于min的就为min,大于max的就为max
    	////inline Tensor Tensor::clamp(c10::optional min, c10::optional max) const
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//a[0][0] = 20;
    	//a[0][1] = 21;
    	//a[0][2] = 22;
    	//a[1][0] = 23;
    	//a[1][1] = 24;
    	//std::cout << a << std::endl;
    
    	//torch::Tensor b = a.clamp(21, 22);
    	//std::cout << b << std::endl;
    
    	////11.大于> 小于< 运算,求mask!
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//std::cout << a << std::endl;
    	//torch::Tensor b = a > 0.5;
    	//std::cout << b << std::endl;
    
    	//12.转置Tensor::transpose ,只用于2个维度!
    	////inline Tensor Tensor::transpose(Dimname dim0, Dimname dim1) const
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//std::cout << a << std::endl;
    
    	//torch::Tensor b = a.transpose(1, 0);
    	//std::cout << b << std::endl;
    	
    	////13.expand_as
    	////inline Tensor Tensor::expand_as(const Tensor & other) const
    	//torch::Tensor a = torch::rand({ 2,3 });
    	////torch::Tensor b = torch::ones({2,2});
    	//torch::Tensor b = torch::ones({ 2,1 });
    	//torch::Tensor c = b.expand_as(a);
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    	//std::cout << c << std::endl;
    	////注意维度有一定要求,我这么写torch::Tensor b = torch::ones({2,2});torch::Tensor b = torch::ones({2});都会报错!
    
    	////14.乘 mul_ 除div 减sub_
    	//boxes_my.select(1, 0).mul_(width);
    	//boxes_my.select(1, 1).mul_(height);
    	//boxes_my.select(1, 2).mul_(width);
    	//boxes_my.select(1, 3).mul_(height);
    
    	//prediction.select(2, 3).div(2);
    
    	//input_tensor[0][0] = input_tensor[0][0].sub_(0.485).div_(0.229);
    	//input_tensor[0][1] = input_tensor[0][1].sub_(0.456).div_(0.224);
    	//input_tensor[0][2] = input_tensor[0][2].sub_(0.406).div_(0.225);
    
    	////15.加载模型
    	//torch::Device m_device(torch::kCUDA);
    	//torch::jit::script::Module m_model = torch::jit::load(path_pt);
    	//m_model.to(m_device);
    	//m_model.eval();
    	
    	////16.模型forward出来的结果
    	//auto output = m_model.forward({ input_tensor });
    	//auto tpl = output.toTuple();
    	//auto arm_loc = tpl->elements()[0].toTensor();
    	////arm_loc.print();
    	////std::cout<<arm_loc[0]<<std::endl;
    	//auto arm_conf = tpl->elements()[1].toTensor();
    	////arm_conf.print();
    	//auto odm_loc = tpl->elements()[2].toTensor();
    	////odm_loc.print();
    	////std::cout<<odm_loc[0]<<std::endl;
    	//auto odm_conf = tpl->elements()[3].toTensor();
    	////odm_conf.print();
    
    	////17.resize_ 和zero_
    	////Tensor & resize_(IntArrayRef size) const;
    	////Tensor& zero_() const;
    	//torch::Tensor a = torch::rand({ 1,3,2,2 });
    	//const int batch_size = a.size(0);
    	//const int depth = a.size(1);
    	//const int image_height = a.size(2);
    	//const int image_width = a.size(3);
    
    	//torch::Tensor crops = torch::rand({ 1,3,2,2 });
    	////torch::Tensor crops;
    	//crops.resize_({ batch_size, depth, image_height, image_width });
    	//crops.zero_();
    
    	//std::cout << a << std::endl;
    	//std::cout << crops << std::endl;
    	////注意:这里如果只定义 torch::Tensor crops;//torch::Tensor crops = torch::rand({1,3,2,2});
    	////就会报错,感觉还是要先初始化一下才会分配内存,要不然就会报错!	
    
    	////18.meshgrid 把tensor变成方阵
    	////static inline std::vector meshgrid(TensorList tensors)
    	//torch::Tensor scales = torch::ones({ 2 });
    	//torch::Tensor ratios = torch::ones({ 2 });
    	//ratios += 2;
    
    	//std::cout << scales << std::endl;
    	//std::cout << ratios << std::endl;
    
    	//std::vector<torch::Tensor> mesh = torch::meshgrid({ scales, ratios });
    
    	//torch::Tensor scales_1 = mesh[0];
    	//torch::Tensor ratios_1 = mesh[1];
    
    	//std::cout << scales_1 << std::endl;
    	//std::cout << ratios_1 << std::endl;
    
    	////19.flatten 展平tensor
    	////Tensor flatten(int64_t start_dim = 0, int64_t end_dim = -1) const;
    	////Tensor flatten(int64_t start_dim, int64_t end_dim, Dimname out_dim) const;
    	////Tensor flatten(Dimname start_dim, Dimname end_dim, Dimname out_dim) const;
    	////Tensor flatten(DimnameList dims, Dimname out_dim) const;
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//torch::Tensor b = a.flatten();
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    	
    	////20.fill_ tensor填充某个值 就地操作,填充当前tensor
    	////Tensor& fill_(Scalar value) const;
    	////Tensor& fill_(const Tensor& value) const;
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//torch::Tensor b = a.fill_(4);
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    
    	////21.torch::stack
    	////static inline Tensor stack(TensorList tensors, int64_t dim)
    	//torch::Tensor a = torch::rand({ 3 });
    	//torch::Tensor b = torch::rand({ 3 });
    	//torch::Tensor c = torch::stack({ a,b }, 1);
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    	//std::cout << c << std::endl;
    
    	////22.reshape
    	//torch::Tensor a = torch::rand({ 2,4 });
    	//torch::Tensor b = a.reshape({ -1,2 });
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    
    	////23.view
    	////inline Tensor Tensor::view(IntArrayRef size) const
    	////需要先contiguous, a.contiguous().view({ -1, 4 });
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//torch::Tensor b = a.contiguous().view({ -1, 6 });
    	//torch::Tensor c = a.contiguous().view({ 3, 2 });
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    	//std::cout << c << std::endl;
    
    	////24.argmax argmin ,跟想象的反着?
    	////static inline Tensor argmax(const Tensor& self, c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false);
    	////static inline Tensor argmin(const Tensor& self, c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false);
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//auto b = torch::argmax(a, 0);
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    
    	////25.where
    	////static inline Tensor where(const Tensor& condition, const Tensor& self, const Tensor& other);
    	////static inline std::vector where(const Tensor& condition);
    	////torch::Tensor d = torch::where(a > 0.5, b, c);
    	////说明:在a大于0.5的位置设为1,1上用b的1位置上面值填充,其余的位置上值是c的值
    	////25.1
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//torch::Tensor b = torch::ones({ 2,3 });
    	//torch::Tensor c = torch::zeros({ 2,3 });
    
    	//torch::Tensor d = torch::where(a > 0.5, b, c);
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    	//std::cout << c << std::endl;
    	//std::cout << d << std::endl;
    
    	////25.2
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//auto b = torch::where(a > 0.5);
    
    	//std::cout << a << std::endl;
    	//std::cout << b << std::endl;
    
    	////26.accessor 访问器,感觉类似于.item()?
    	////TensorAccessor<T, N> accessor() const&
    	////auto result_data = result.accessor<float, 2>(); //2代表二维
    	////26.1
    	//torch::Tensor one = torch::randn({ 9,6 });
    	//auto foo_one = one.accessor<float, 2>();
    	//for (int i = 0, sum = 0; i < foo_one.size(0); i++)
    	//	for (int j = 0; j < foo_one.size(1); j++)
    	//		sum += foo_one[i][j];
    	//
    	////26.2
    	//torch::Tensor result;
    	//for (int i = 1; i < m_num_class; i++)
    	//{
    	//	//...
    	//	if (0 == result.numel())
    	//	{
    	//		result = result_.clone();
    	//	}
    	//	else
    	//	{
    	//		result = torch::cat({ result,result_ }, 0);//按行拼接
    	//	}
    	//}
    	//result = result.cpu();
    	//auto result_data = result.accessor<float, 2>();
    
    	//cv::Mat img_draw = img.clone();
    	//for (int i = 0; i < result_data.size(0); i++)
    	//{
    	//	float score = result_data[i][4];
    	//	if (score < 0.4) { continue; }
    	//	int x1 = result_data[i][0];
    	//	int y1 = result_data[i][1];
    	//	int x2 = result_data[i][2];
    	//	int y2 = result_data[i][3];
    	//	int id_label = result_data[i][5];
    
    	//	cv::rectangle(img_draw, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255, 0, 0), 3);
    	//	cv::putText(img_draw, label_map[id_label], cv::Point(x1, y2), CV_FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(255, 0, 55));
    	//}
    	
    	////27. torch::max和torch::min
    	////static inline std::tuple<Tensor,Tensor> max(const Tensor & self, Dimname dim, bool keepdim=false);
    	////static inline Tensor max(const Tensor& self);
    	////27.1 某个维度上的max,min
    	//torch::Tensor a = torch::rand({ 4,2 });
    	//std::tuple<torch::Tensor, torch::Tensor> max_test = torch::max(a, 1);
    	//auto max_val = std::get<0>(max_test);
    	//// index
    	//auto index = std::get<1>(max_test);
    
    	//std::cout << a << std::endl;
    	//std::cout << max_val << std::endl;
    	//std::cout << index << std::endl;
    
    	////27.2 全局max
    	//torch::Tensor a = torch::rand({ 4,2 });
    	//torch::Tensor max_test = torch::max(a);
    
    	//std::cout << a << std::endl;
    	//std::cout << max_test << std::endl;
    
    	////28.masked_select 与 masked_fill
    	////28.1 Tensor masked_select(const Tensor & mask) const;
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//torch::Tensor c = (a > 0.25);
    	//torch::Tensor d = a.masked_select(c);
    
    	//std::cout << a << std::endl;
    	//std::cout << c << std::endl;
    	//std::cout << d << std::endl;
    
    	////28.2 Tensor masked_fill(const Tensor & mask, Scalar value) const;
    	////Tensor& masked_fill_(const Tensor& mask, const Tensor& value) const;
    	////Tensor masked_fill(const Tensor& mask, const Tensor& value) const;
    	//torch::Tensor a = torch::rand({ 2,3 });
    	//torch::Tensor aa = a.clone();
    	//aa.masked_fill_(aa > 0.5, -2);
    
    	//std::cout << a << std::endl;
    	//std::cout << aa << std::endl;
    
    	////28.3 masked_fill_ 带下划线的都是就地操作
    	//float index[] = { 3,2,3,3,5,6,7,8,9,10,11,12,13,14,15,16 };
    	//float score[] = { 0.1,0.1,0.9,0.9,0.9,0.1,0.1,0.1,0.1,0.1,0.8,0.8,0.8,0.8,0.8,0.8 };
    
    	//torch::Tensor aa = torch::from_blob(index, { 4,4 }).toType(torch::kFloat32);
    	//torch::Tensor bb = torch::from_blob(score, { 4,4 }).toType(torch::kFloat32);
    	//std::cout << aa << std::endl;
    	//std::cout << bb << std::endl;
    
    	//torch::Tensor tmp = (aa == 3);
    	//torch::Tensor tmp_2 = (bb >= 0.9);
    	//std::cout << tmp << std::endl;
    	//std::cout << tmp_2 << std::endl;
    
    	//torch::Tensor condition_111 = tmp * tmp_2;
    	//std::cout << condition_111 << std::endl;
    	//aa.masked_fill_(condition_111, -1);
    	//std::cout << aa << std::endl;
    
    	////29.libtorch综合操作1: 分类部署?
    	//torch::jit::script::Module module = torch::jit::load(argv[1]);
    	//std::cout << "== Switch to GPU mode" << std::endl;
    	//// to GPU
    	//module.to(at::kCUDA);
    
    	//if (LoadImage(file_name, image)) {
    	//	auto input_tensor = torch::from_blob(image.data, { 1, kIMAGE_SIZE, kIMAGE_SIZE, kCHANNELS });
    	//	input_tensor = input_tensor.permute({ 0, 3, 1, 2 });
    	//	input_tensor[0][0] = input_tensor[0][0].sub_(0.485).div_(0.229);
    	//	input_tensor[0][1] = input_tensor[0][1].sub_(0.456).div_(0.224);
    	//	input_tensor[0][2] = input_tensor[0][2].sub_(0.406).div_(0.225);
    
    	//	// to GPU
    	//	input_tensor = input_tensor.to(at::kCUDA);
    
    	//	torch::Tensor out_tensor = module.forward({ input_tensor }).toTensor();
    
    	//	auto results = out_tensor.sort(-1, true);
    	//	auto softmaxs = std::get<0>(results)[0].softmax(0);
    	//	auto indexs = std::get<1>(results)[0];
    
    	//	for (int i = 0; i < kTOP_K; ++i) {
    	//		auto idx = indexs[i].item<int>();
    	//		std::cout << "    ============= Top-" << i + 1
    	//			<< " =============" << std::endl;
    	//		std::cout << "    Label:  " << labels[idx] << std::endl;
    	//		std::cout << "    With Probability:  "
    	//			<< softmaxs[i].item<float>() * 100.0f << "%" << std::endl;
    	//	}
    	//}
    
    	////30.pytorch nms <---------> libtorch nms
    	////...
    	
    	////31.数据类型很重要! .to(torch::kByte);
    	////31.1
    	////[128,512]
    	//torch::Tensor b = torch::argmax(output_1, 2).cpu();
    	////std::cout<<b<<std::endl;
    	//b.print();
    
    	//cv::Mat mask(T_height, T_width, CV_8UC1, (uchar*)b.data_ptr());
    	//imshow("mask", mask * 255);
    	//waitKey(0);
    	
    	////31.2 要把中间处理的图片Mat转为float32
    	//Mat m_tmp = grayMat.clone();
    	//m_tmp.convertTo(m_tmp, CV_32FC1);/////又是个大坑 图片要先转float32啊
    	//torch::Tensor label_deal = torch::from_blob(
    	//m_tmp.data, { grayMat.rows, grayMat.cols }).toType(torch::kByte).to(m_device);
    
    	////32.指针访问Tensor数据
    	////32.1
    	//torch::Tensor output = m_model->forward({ input_tensor }).toTensor()[0];
    	//torch::Tensor output_cpu = output.cpu();
    	////output_cpu     Variable[CPUFloatType] [26, 480, 480]]
    	//output_cpu.print();
    
    	//void* ptr = output_cpu.data_ptr();
    	////std::cout<<(float*)ptr[0]<<std::endl;
    	////只能用void 或者auto来定义,否则会报错。比如我用float* ptr = output_cpu.data_ptr(); 会报错:
    	////	error : invalid conversion from ‘void’ to ‘float’[-fpermissive]
    	////	float* ptr = output_cpu.data_ptr();
    	////那么void* 编译通过了,我需要用指针访问tensor里面的数据啊!
    	
    	////32.2
    	//torch::Tensor output = m_model->forward({ input_tensor }).toTensor()[0];
    	//torch::Tensor output_cpu = output.cpu();
    	////output_cpu     Variable[CPUFloatType] [26, 480, 480]]
    	//output_cpu.print();
    	//void* ptr = output_cpu.data_ptr();
    	//std::cout << (float*)ptr << std::endl;//打印地址
    
    	////32.3 注意要给void* 做指针类型转换!
    	//void* ptr = output_cpu.data_ptr();
    	////        std::cout<<*((float*)ptr[0][0][0])<<std::endl;
    	////        std::cout<<(float*)ptr[0][0][0]<<std::endl;
    
    	//std::cout << *((float*)(ptr + 2)) << std::endl;
    
    	////32.3.1
    	//const float* result = reinterpret_cast<const float*>(output_cpu.data_ptr());
    	////32.3.2
    	//void* ptr = output_cpu.data_ptr();
    	//const float* result = (float*)ptr;
    	
    	////44. 输出多个tensor(pytorch端)以及取出多个tensor(libtorch端)!
    	////pytorch端的输出:
    	//def forward(self, x, batch = None):
    	//	output, cnn_feature = self.dla(x)
    	//	return (output['ct_hm'], output['wh'], cnn_feature)
    
    	////对应的libtorch端
    	//auto out = m_model->forward({ input_tensor });
    	//auto tpl = out.toTuple();
    	//auto out_ct_hm = tpl->elements()[0].toTensor();
    	//out_ct_hm.print();
    	//auto out_wh = tpl->elements()[1].toTensor();
    	//out_wh.print();
    	//auto out_cnn_feature = tpl->elements()[2].toTensor();
    	//out_cnn_feature.print();
    	
    	////如果输出单个tensor,就是
    	//at::Tensor output = module->forward(inputs).toTensor();
    
    	////45. torch::Tensor作为函数参数,不管是引用还是不引用,函数内部对形参操作都会影响本来的tensor,即都是引用
    	//
    	////46. 实现pytorch下标神操作. 注意:libtorch端不支持下标操作,可以用select
    	//
    	////49.torch.gather
    
    	////50. torch::argsort,是torch::sort的第二个返回值
    	//
    	////51. 判断tensor是否为空 ind_mask.sizes().empty()
    	
    	////52.pytorch代码 out = aim[ind_mask],用libtorch写出来。
    	//torch::Tensor a = torch::rand({ 5,3,2 });
    	//torch::Tensor idx = torch::zeros({ 5 }).toType(torch::kLong);
    	//idx[3] = 1;
    	//idx[1] = 1;
    
    	//torch::Tensor abc = torch::nonzero(idx);
    	//torch::Tensor b = a.index_select(0, abc.squeeze());
    
    	//std::cout << a << std::endl;
    	//std::cout << abc << std::endl;
    	//std::cout << b << std::endl;
    
    	////53. pytorch代码a4 = arr[...,3,0] 用libtorch如何表达出来 masked_select运用!
    	
    
    	return 0;
    }
    

    参考:

    https://www.cnblogs.com/yanghailin/p/12901586.html (libtorch 常用api函数示例(史上最全、最详细))

  • 相关阅读:
    使用 Istio 进行 JWT 身份验证(充当 API 网关)
    DNS 私有域的选择:internal.xxx.com/lan.xxx.com 还是 xxx.local/xxx.srv?
    「Bug」K8s 节点的 IP 地址泄漏,导致 IP 被耗尽
    Linux网络学习笔记(二):域名解析(DNS)——以 CoreDNS 为例
    Linux 发行版的选用(服务器和个人桌面)
    「Bug」VMware 虚拟机的关机测试中,Ubuntu 明显比 CentOS 慢
    VMware vSphere :服务器虚拟化
    「Bug」ubuntu 使用国内 apt 源构建 docker 时提示 hash 不匹配
    留言板
    Idea 自定义快捷代码输入 如syso => System.out.println()
  • 原文地址:https://www.cnblogs.com/liutianrui1/p/13926355.html
Copyright © 2020-2023  润新知