• Python caffe.TEST Example(Demo)


    下面提供了caffe python的六个测试demo,大家可以根据自己的需求进行修改。

    Example 1

    • From project FaceDetection_CNN-master, under directory , in source file test.py.
    def convert_full_conv():
     # Load the original network and extract the fully connected layers' parameters.
        net = caffe.Net('deploy.prototxt',
                        'alexNet__iter_60000.caffemodel',
                        caffe.TEST)
        params = ['fc6', 'fc7', 'fc8_flickr']
        fc_params = {pr: (net.params[pr][0].data, net.params[pr][1].data) for pr in params}
        # Load the fully convolutional network to transplant the parameters.
        net_full_conv = caffe.Net('face_full_conv.prototxt',
                               'alexNet__iter_60000.caffemodel',
                                  caffe.TEST)
        params_full_conv = ['fc6-conv', 'fc7-conv', 'fc8-conv']
        conv_params = {pr: (net_full_conv.params[pr][0].data, net_full_conv.params[pr][1].data) for pr in params_full_conv}
        for pr, pr_conv in zip(params, params_full_conv):
           conv_params[pr_conv][0].flat = fc_params[pr][0].flat  # flat unrolls the arrays
           conv_params[pr_conv][1][...] = fc_params[pr][1]
        net_full_conv.save('face_full_conv.caffemodel')
    

    Example 2

    • From project visual-concepts-master, under directory , in source file test_model.py.
    def load_model(prototxt_file, model_file, base_image_size, mean, vocab): 
      """
      Load the model from file. Includes pointers to the prototxt file, 
      caffemodel file name, and other settings - image mean, base_image_size, vocab 
      """
      model = {};
      model['net']= caffe.Net(prototxt_file, model_file, caffe.TEST);
      model['base_image_size'] = base_image_size;
      model['means'] = mean; model['vocab'] = vocab;
      return model
    

    Example 3

    • From project SketchingAI-master, under directory src, in source file gendraw.py.
    • Caffe中只给出了分类模型classify.py,如果想写预测模型predict.py可以参考这个
    def test_old():
        with open(labelspath,"r") as opened_file:
            labels = opened_file.readlines()
    
        caffe.set_mode_gpu()
        net = caffe.Net(model_file, pretrained, caffe.TEST)
    
        transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape})
        transformer.set_transpose("data",(2,0,1))
        transformer.set_mean("data",numpy.load(caffe_root+"/python/caffe/imagenet/ilsvrc_2012_mean.npy").mean(1).mean(1))
        transformer.set_raw_scale("data",255)
        transformer.set_channel_swap("data",(2,1,0))
    
        net.blobs["data"].reshape(1,3,227,227)
    
    
        test_image = dataroot+"/homecat.jpg"
        test_image1 = dataroot+"/241.png"
    
        net.blobs["data"].data[...] = transformer.preprocess("data", caffe.io.load_image(test_image1))
        
        out = net.forward()    
        print net.blobs["fc6"].data.shape
    
        prediction = out["prob"]
    
        indices = numpy.argpartition(prediction[0],-10)[-10:]
    
        print prediction[0].argmax(), labels[prediction[0].argmax()]
    
        
        net.blobs["data"].data[...] = transformer.preprocess("data", caffe.io.load_image(test_image))
    
        out = net.forward()    
        print net.blobs["fc6"].data.shape
        
    
        prediction = out["prob"]
    
        indices = numpy.argpartition(prediction[0],-10)[-10:]
    
        print prediction[0].argmax(), labels[prediction[0].argmax()]
    
        for index in indices:
            print labels[index]
    

    Example 4

    • From project fast-rcnn-master, under directory tools, in source file compress_net.py.
    def main():
        args = parse_args()
    
        net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
    
        net_svd = caffe.Net(args.prototxt_svd, args.caffemodel, caffe.TEST)
    
        print('Uncompressed network {} : {}'.format(args.prototxt, args.caffemodel))
        print('Compressed network prototxt {}'.format(args.prototxt_svd))
    
        out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svd'
        out_dir = os.path.dirname(args.caffemodel)
    
        # Compress fc6
        if net_svd.params.has_key('fc6_L'):
            l_fc6 = net_svd.params['fc6_L'][0].data.shape[0]
            print('  fc6_L bottleneck size: {}'.format(l_fc6))
    
            # uncompressed weights and biases
            W_fc6 = net.params['fc6'][0].data
            B_fc6 = net.params['fc6'][1].data
    
            print('  compressing fc6...')
            Ul_fc6, L_fc6 = compress_weights(W_fc6, l_fc6)
    
            assert(len(net_svd.params['fc6_L']) == 1)
    
            # install compressed matrix factors (and original biases)
            net_svd.params['fc6_L'][0].data[...] = L_fc6
    
            net_svd.params['fc6_U'][0].data[...] = Ul_fc6
            net_svd.params['fc6_U'][1].data[...] = B_fc6
    
            out += '_fc6_{}'.format(l_fc6)
    
        # Compress fc7
        if net_svd.params.has_key('fc7_L'):
            l_fc7 = net_svd.params['fc7_L'][0].data.shape[0]
            print '  fc7_L bottleneck size: {}'.format(l_fc7)
    
            W_fc7 = net.params['fc7'][0].data
            B_fc7 = net.params['fc7'][1].data
    
            print('  compressing fc7...')
            Ul_fc7, L_fc7 = compress_weights(W_fc7, l_fc7)
    
            assert(len(net_svd.params['fc7_L']) == 1)
    
            net_svd.params['fc7_L'][0].data[...] = L_fc7
    
            net_svd.params['fc7_U'][0].data[...] = Ul_fc7
            net_svd.params['fc7_U'][1].data[...] = B_fc7
    
            out += '_fc7_{}'.format(l_fc7)
    
        filename = '{}/{}.caffemodel'.format(out_dir, out)
        net_svd.save(filename)
        print 'Wrote svd model to: {:s}'.format(filename)
    

    Example 5

    • From project DIGITS-master, under directory digits/model/tasks, in source file caffe_train.py.
    def get_net(self, epoch=None):
            """
            Returns an instance of caffe.Net
    
            Keyword Arguments:
            epoch -- which snapshot to load (default is -1 to load the most recently generated snapshot)
            """
            if not self.has_model():
                return False
    
            file_to_load = None
    
            if not epoch:
                epoch = self.snapshots[-1][1]
                file_to_load = self.snapshots[-1][0]
            else:
                for snapshot_file, snapshot_epoch in self.snapshots:
                    if snapshot_epoch == epoch:
                        file_to_load = snapshot_file
                        break
            if file_to_load is None:
                raise Exception('snapshot not found for epoch "%s"' % epoch)
    
            # check if already loaded
            if self.loaded_snapshot_file and self.loaded_snapshot_file == file_to_load 
                    and hasattr(self, '_caffe_net') and self._caffe_net is not None:
                return self._caffe_net
    
            if config_value('caffe_root')['cuda_enabled'] and
                    config_value('gpu_list'):
                caffe.set_mode_gpu()
    
            # load a new model
            self._caffe_net = caffe.Net(
                    self.path(self.deploy_file),
                    file_to_load,
                    caffe.TEST)
    
            self.loaded_snapshot_epoch = epoch
            self.loaded_snapshot_file = file_to_load
    
            return self._caffe_net
    

    Example 6

    • From project DIGITS-master, under directory examples/classification, in source file example.py.
    def get_net(caffemodel, deploy_file, use_gpu=True):
        """
        Returns an instance of caffe.Net
    
        Arguments:
        caffemodel -- path to a .caffemodel file
        deploy_file -- path to a .prototxt file
    
        Keyword arguments:
        use_gpu -- if True, use the GPU for inference
        """
        if use_gpu:
            caffe.set_mode_gpu()
    
        # load a new model
        return caffe.Net(deploy_file, caffemodel, caffe.TEST)
    
  • 相关阅读:
    Python第二
    Python第一讲以及计算机基础
    MySQL第五讲
    MySQL第四讲
    MySQL第三讲
    MySQL第一讲概论
    MySQL日常笔记第二讲
    Linux修改用户组
    XAMPP中proftpd的简明配置方法
    解决php configure: error: Cannot find ldap libraries in /usr/lib.错误
  • 原文地址:https://www.cnblogs.com/zhonghuasong/p/7427531.html
Copyright © 2020-2023  润新知