https://www.cnblogs.com/psklf/p/7700834.html
https://stackoverflow.com/questions/16475737/convert-uiimage-to-cmsamplebufferref
- (CGImageRef) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer // Create a CGImageRef from sample buffer data { CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); CVPixelBufferLockBaseAddress(imageBuffer,0); // Lock the image buffer uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0); // Get information of the image size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); size_t width = CVPixelBufferGetWidth(imageBuffer); size_t height = CVPixelBufferGetHeight(imageBuffer); CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); CGImageRef newImage = CGBitmapContextCreateImage(newContext); CGContextRelease(newContext); CGColorSpaceRelease(colorSpace); CVPixelBufferUnlockBaseAddress(imageBuffer,0); /* CVBufferRelease(imageBuffer); */ // do not call this! return newImage; }
//This is a function that I use in my GPUImage framework to resize an incoming CMSampleBufferRef and place the scaled results within a CVPixelBufferRef that you provide: void GPUImageCreateResizedSampleBuffer(CVPixelBufferRef cameraFrame, CGSize finalSize, CMSampleBufferRef *sampleBuffer) { // CVPixelBufferCreateWithPlanarBytes for YUV input CGSize originalSize = CGSizeMake(CVPixelBufferGetWidth(cameraFrame), CVPixelBufferGetHeight(cameraFrame)); CVPixelBufferLockBaseAddress(cameraFrame, 0); GLubyte *sourceImageBytes = CVPixelBufferGetBaseAddress(cameraFrame); CGDataProviderRef dataProvider = CGDataProviderCreateWithData(NULL, sourceImageBytes, CVPixelBufferGetBytesPerRow(cameraFrame) * originalSize.height, NULL); CGColorSpaceRef genericRGBColorspace = CGColorSpaceCreateDeviceRGB(); CGImageRef cgImageFromBytes = CGImageCreate((int)originalSize.width, (int)originalSize.height, 8, 32, CVPixelBufferGetBytesPerRow(cameraFrame), genericRGBColorspace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst, dataProvider, NULL, NO, kCGRenderingIntentDefault); GLubyte *imageData = (GLubyte *) calloc(1, (int)finalSize.width * (int)finalSize.height * 4); CGContextRef imageContext = CGBitmapContextCreate(imageData, (int)finalSize.width, (int)finalSize.height, 8, (int)finalSize.width * 4, genericRGBColorspace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); CGContextDrawImage(imageContext, CGRectMake(0.0, 0.0, finalSize.width, finalSize.height), cgImageFromBytes); CGImageRelease(cgImageFromBytes); CGContextRelease(imageContext); CGColorSpaceRelease(genericRGBColorspace); CGDataProviderRelease(dataProvider); CVPixelBufferRef pixel_buffer = NULL; CVPixelBufferCreateWithBytes(kCFAllocatorDefault, finalSize.width, finalSize.height, kCVPixelFormatType_32BGRA, imageData, finalSize.width * 4, stillImageDataReleaseCallback, NULL, NULL, &pixel_buffer); CMVideoFormatDescriptionRef videoInfo = NULL; CMVideoFormatDescriptionCreateForImageBuffer(NULL, pixel_buffer, &videoInfo); CMTime frameTime = CMTimeMake(1, 30); CMSampleTimingInfo timing = {frameTime, frameTime, kCMTimeInvalid}; CMSampleBufferCreateForImageBuffer(kCFAllocatorDefault, pixel_buffer, YES, NULL, NULL, videoInfo, &timing, sampleBuffer); CFRelease(videoInfo); CVPixelBufferRelease(pixel_buffer); } //It doesn't take you all the way to creating a CMSampleBufferRef, but as weichsel points out, you only need the CVPixelBufferRef for encoding the video.