[
  {
    "path": ".gitignore",
    "content": "# Xcode\n.DS_Store\nbuild/\n*.pbxuser\n!default.pbxuser\n*.mode1v3\n!default.mode1v3\n*.mode2v3\n!default.mode2v3\n*.perspectivev3\n!default.perspectivev3\n*.xcworkspace\n!default.xcworkspace\nxcuserdata\nprofile\n*.moved-aside\nDerivedData\n.idea/\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/DFUAppDelegate.h",
    "content": "//\n//  DFUAppDelegate.h\n//  DFURTSPPlayer\n//\n//  Created by Bogdan Furdui on 3/7/13.\n//  Copyright (c) 2013 Bogdan Furdui. All rights reserved.\n//\n\n#import <UIKit/UIKit.h>\n\n@class DFUViewController;\n\n@interface DFUAppDelegate : UIResponder <UIApplicationDelegate>\n\n@property (strong, nonatomic) UIWindow *window;\n\n@property (strong, nonatomic) DFUViewController *viewController;\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/DFUAppDelegate.m",
    "content": "//\n//  DFUAppDelegate.m\n//  DFURTSPPlayer\n//\n//  Created by Bogdan Furdui on 3/7/13.\n//  Copyright (c) 2013 Bogdan Furdui. All rights reserved.\n//\n\n#import \"DFUAppDelegate.h\"\n#import \"DFUViewController.h\"\n\n@implementation DFUAppDelegate\n\n- (void)dealloc\n{\n    [_window release];\n    [_viewController release];\n    [super dealloc];\n}\n\n- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions\n{\n    self.window = [[[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]] autorelease];\n    // Override point for customization after application launch.\n    if ([[UIDevice currentDevice] userInterfaceIdiom] == UIUserInterfaceIdiomPhone) {\n        self.viewController = [[[DFUViewController alloc] initWithNibName:@\"DFUViewController_iPhone\" bundle:nil] autorelease];\n    } else {\n        self.viewController = [[[DFUViewController alloc] initWithNibName:@\"DFUViewController_iPad\" bundle:nil] autorelease];\n    }\n    self.window.rootViewController = self.viewController;\n    [self.window makeKeyAndVisible];\n    return YES;\n}\n\n- (void)applicationWillResignActive:(UIApplication *)application\n{\n    // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state.\n    // Use this method to pause ongoing tasks, disable timers, and throttle down OpenGL ES frame rates. Games should use this method to pause the game.\n}\n\n- (void)applicationDidEnterBackground:(UIApplication *)application\n{\n    // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. \n    // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits.\n}\n\n- (void)applicationWillEnterForeground:(UIApplication *)application\n{\n    // Called as part of the transition from the background to the inactive state; here you can undo many of the changes made on entering the background.\n}\n\n- (void)applicationDidBecomeActive:(UIApplication *)application\n{\n    // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface.\n}\n\n- (void)applicationWillTerminate:(UIApplication *)application\n{\n    // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:.\n}\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/DFURTSPPlayer-Info.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>CFBundleDevelopmentRegion</key>\n\t<string>en</string>\n\t<key>CFBundleDisplayName</key>\n\t<string>${PRODUCT_NAME}</string>\n\t<key>CFBundleExecutable</key>\n\t<string>${EXECUTABLE_NAME}</string>\n\t<key>CFBundleIdentifier</key>\n\t<string>com.clujtech.ro.${PRODUCT_NAME:rfc1034identifier}</string>\n\t<key>CFBundleInfoDictionaryVersion</key>\n\t<string>6.0</string>\n\t<key>CFBundleName</key>\n\t<string>${PRODUCT_NAME}</string>\n\t<key>CFBundlePackageType</key>\n\t<string>APPL</string>\n\t<key>CFBundleShortVersionString</key>\n\t<string>1.0</string>\n\t<key>CFBundleSignature</key>\n\t<string>????</string>\n\t<key>CFBundleVersion</key>\n\t<string>1.0</string>\n\t<key>LSRequiresIPhoneOS</key>\n\t<true/>\n\t<key>UIRequiredDeviceCapabilities</key>\n\t<array>\n\t\t<string>armv7</string>\n\t</array>\n\t<key>UISupportedInterfaceOrientations</key>\n\t<array>\n\t\t<string>UIInterfaceOrientationPortrait</string>\n\t\t<string>UIInterfaceOrientationLandscapeLeft</string>\n\t\t<string>UIInterfaceOrientationLandscapeRight</string>\n\t\t<string>UIInterfaceOrientationPortraitUpsideDown</string>\n\t</array>\n\t<key>UISupportedInterfaceOrientations~ipad</key>\n\t<array>\n\t\t<string>UIInterfaceOrientationPortrait</string>\n\t\t<string>UIInterfaceOrientationPortraitUpsideDown</string>\n\t\t<string>UIInterfaceOrientationLandscapeLeft</string>\n\t\t<string>UIInterfaceOrientationLandscapeRight</string>\n\t</array>\n</dict>\n</plist>\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/DFURTSPPlayer-Prefix.pch",
    "content": "//\n// Prefix header for all source files of the 'DFURTSPPlayer' target in the 'DFURTSPPlayer' project\n//\n\n#import <Availability.h>\n\n#ifndef __IPHONE_4_0\n#warning \"This project uses features only available in iOS SDK 4.0 and later.\"\n#endif\n\n#ifdef __OBJC__\n    #import <UIKit/UIKit.h>\n    #import <Foundation/Foundation.h>\n#endif\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/DFUViewController.h",
    "content": "//\n//  DFUViewController.h\n//  DFURTSPPlayer\n//\n//  Created by Bogdan Furdui on 3/7/13.\n//  Copyright (c) 2013 Bogdan Furdui. All rights reserved.\n//\n\n#import <UIKit/UIKit.h>\n\n@class RTSPPlayer;\n\n@interface DFUViewController : UIViewController\n{\n    IBOutlet UIImageView *imageView;\n\tIBOutlet UILabel *label;\n\tIBOutlet UIButton *playButton;\n    RTSPPlayer *video;\n\tfloat lastFrameTime;\n}\n\n@property (nonatomic, retain) IBOutlet UIImageView *imageView;\n@property (nonatomic, retain) IBOutlet UILabel *label;\n@property (nonatomic, retain) IBOutlet UIButton *playButton;\n@property (nonatomic, retain) RTSPPlayer *video;\n\n- (IBAction)playButtonAction:(id)sender;\n- (IBAction)showTime:(id)sender;\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/DFUViewController.m",
    "content": "//\n//  DFUViewController.m\n//  DFURTSPPlayer\n//\n//  Created by Bogdan Furdui on 3/7/13.\n//  Copyright (c) 2013 Bogdan Furdui. All rights reserved.\n//\n\n#import \"DFUViewController.h\"\n#import \"RTSPPlayer.h\"\n#import \"Utilities.h\"\n\n@interface DFUViewController ()\n@property (nonatomic, retain) NSTimer *nextFrameTimer;\n@end\n\n@implementation DFUViewController\n\n@synthesize imageView, label, playButton, video;\n@synthesize nextFrameTimer = _nextFrameTimer;\n\n- (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil\n{\n    if ((self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil])) {\n        //http://www.wowza.com/_h264/BigBuckBunny_115k.mov\n        //rtsp://media1.law.harvard.edu/Media/policy_a/2012/02/02_unger.mov\n        //rtsp://streaming.parliament.act.gov.au/medium\n        \n        video = [[RTSPPlayer alloc] initWithVideo:@\"http://112.65.235.145/vlive.qqvideo.tc.qq.com/v00113mzdsr.mp4?vkey=03BDF0A68787D1B7937B386F359603E71EB7DD4C2F924DCCD1A956178BAAD4C5B958596242EB5FF8&br=72&platform=0&fmt=mp4&level=3\" usesTcp:NO];\n        video.outputWidth = 426;\n        video.outputHeight = 320;\n\n        NSLog(@\"video duration: %f\",video.duration);\n        NSLog(@\"video size: %d x %d\", video.sourceWidth, video.sourceHeight);\n    }\n    \n    return self;\n}\n\n- (void)dealloc\n{\n\t[video release];\n    video = nil;\n    \n\t[imageView release];\n    imageView = nil;\n    \n\t[label release];\n    label = nil;\n    \n\t[playButton release];\n    playButton = nil;\n\n    [super dealloc];\n}\n\n- (void)viewDidLoad\n{\n    [super viewDidLoad];\n\n    [imageView setContentMode:UIViewContentModeScaleAspectFit];\n    [self playButtonAction:nil];\n}\n\n- (void)didReceiveMemoryWarning\n{\n    [super didReceiveMemoryWarning];\n    // Dispose of any resources that can be recreated.\n}\n\n-(IBAction)playButtonAction:(id)sender {\n\t[playButton setEnabled:NO];\n\tlastFrameTime = -1;\n\t\n\t// seek to 0.0 seconds\n\t[video seekTime:0.0];\n    \n    [_nextFrameTimer invalidate];\n\tself.nextFrameTimer = [NSTimer scheduledTimerWithTimeInterval:1.0/30\n                                                           target:self\n                                                         selector:@selector(displayNextFrame:)\n                                                         userInfo:nil\n                                                          repeats:YES];\n}\n\n- (IBAction)showTime:(id)sender\n{\n    NSLog(@\"current time: %f s\", video.currentTime);\n}\n\n#define LERP(A,B,C) ((A)*(1.0-C)+(B)*C)\n\n-(void)displayNextFrame:(NSTimer *)timer\n{\n\tNSTimeInterval startTime = [NSDate timeIntervalSinceReferenceDate];\n\tif (![video stepFrame]) {\n\t\t[timer invalidate];\n\t\t[playButton setEnabled:YES];\n        [video closeAudio];\n\t\treturn;\n\t}\n\timageView.image = video.currentImage;\n\tfloat frameTime = 1.0/([NSDate timeIntervalSinceReferenceDate]-startTime);\n\tif (lastFrameTime<0) {\n\t\tlastFrameTime = frameTime;\n\t} else {\n\t\tlastFrameTime = LERP(frameTime, lastFrameTime, 0.8);\n\t}\n\t[label setText:[NSString stringWithFormat:@\"%.0f\",lastFrameTime]];\n}\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegDecoder/AudioStreamer.h",
    "content": "#import <UIKit/UIKit.h>\n#import <AudioToolbox/AudioToolbox.h>\n#import <AVFoundation/AVFoundation.h>\n#import \"RTSPPlayer.h\"\n\n#define kNumAQBufs 3\n#define kAudioBufferSeconds 3\n\ntypedef enum _AUDIO_STATE {\n    AUDIO_STATE_READY           = 0,\n    AUDIO_STATE_STOP            = 1,\n    AUDIO_STATE_PLAYING         = 2,\n    AUDIO_STATE_PAUSE           = 3,\n    AUDIO_STATE_SEEKING         = 4\n} AUDIO_STATE;\n\n@interface AudioStreamer : NSObject\n{\n    NSString *playingFilePath_;\n    AudioStreamBasicDescription audioStreamBasicDesc_;\n    AudioQueueRef audioQueue_;\n    AudioQueueBufferRef audioQueueBuffer_[kNumAQBufs];\n    BOOL started_, finished_;\n    NSTimeInterval durationTime_, startedTime_;\n    NSInteger state_;\n    NSTimer *seekTimer_;\n    NSLock *decodeLock_;\n    RTSPPlayer *_streamer;\n    AVCodecContext *_audioCodecContext;\n}\n\n- (void)_startAudio;\n- (void)_stopAudio;\n- (BOOL)createAudioQueue;\n- (void)removeAudioQueue;\n- (void)audioQueueOutputCallback:(AudioQueueRef)inAQ inBuffer:(AudioQueueBufferRef)inBuffer;\n- (void)audioQueueIsRunningCallback;\n- (OSStatus)enqueueBuffer:(AudioQueueBufferRef)buffer;\n- (id)initWithStreamer:(RTSPPlayer*)streamer;\n\n- (OSStatus)startQueue;\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegDecoder/AudioStreamer.m",
    "content": "#import \"AudioStreamer.h\"\n#import \"RTSPPlayer.h\"\n\nvoid audioQueueOutputCallback(void *inClientData, AudioQueueRef inAQ,\n  AudioQueueBufferRef inBuffer);\nvoid audioQueueIsRunningCallback(void *inClientData, AudioQueueRef inAQ,\n  AudioQueuePropertyID inID);\n\nvoid audioQueueOutputCallback(void *inClientData, AudioQueueRef inAQ,\n  AudioQueueBufferRef inBuffer) {\n\n    AudioStreamer *audioController = (__bridge AudioStreamer*)inClientData;\n    [audioController audioQueueOutputCallback:inAQ inBuffer:inBuffer];\n}\n\nvoid audioQueueIsRunningCallback(void *inClientData, AudioQueueRef inAQ,\n  AudioQueuePropertyID inID) {\n\n    AudioStreamer *audioController = (__bridge AudioStreamer*)inClientData;\n    [audioController audioQueueIsRunningCallback];\n}\n\n@interface AudioStreamer ()\n@property (nonatomic, assign) RTSPPlayer *streamer;\n@property (nonatomic, assign) AVCodecContext *audioCodecContext;\n@end\n\n@implementation AudioStreamer\n\n@synthesize streamer = _streamer;\n@synthesize audioCodecContext = _audioCodecContext;\n\n- (id)initWithStreamer:(RTSPPlayer*)streamer {\n    if (self = [super init]) {\n        AVAudioSession *audioSession = [AVAudioSession sharedInstance];\n        [audioSession setCategory:AVAudioSessionCategoryPlayback error:nil];\n        _streamer = streamer;\n        _audioCodecContext = _streamer._audioCodecContext;\n    }\n    \n    return  self;\n}\n\n\n- (IBAction)playAudio:(UIButton*)sender\n{\n    [self _startAudio];\n}\n\n- (IBAction)pauseAudio:(UIButton*)sender\n{\n    if (started_) {\n      state_ = AUDIO_STATE_PAUSE;\n\n      AudioQueuePause(audioQueue_);\n      AudioQueueReset(audioQueue_);\n    }\n}\n\n- (void)_startAudio\n{\n    NSLog(@\"ready to start audio\");\n    if (started_) {\n        AudioQueueStart(audioQueue_, NULL);\n    } else {\n        [self createAudioQueue] ;\n        [self startQueue];\n    }\n\n    for (NSInteger i = 0; i < kNumAQBufs; ++i) {\n      [self enqueueBuffer:audioQueueBuffer_[i]];\n    }\n\n    state_ = AUDIO_STATE_PLAYING;\n}\n\n- (void)_stopAudio\n{\n    if (started_) {\n        AudioQueueStop(audioQueue_, YES);\n        startedTime_ = 0.0;\n        state_ = AUDIO_STATE_STOP;\n        finished_ = NO;\n    }\n}\n\n- (BOOL)createAudioQueue\n{\n    state_ = AUDIO_STATE_READY;\n    finished_ = NO;\n\n    if (decodeLock_) {\n        [decodeLock_ unlock];\n        decodeLock_ = nil;\n    }\n    \n    decodeLock_ = [[NSLock alloc] init];\n    \n    audioStreamBasicDesc_.mFormatID = -1;\n    audioStreamBasicDesc_.mSampleRate = _audioCodecContext->sample_rate;\n\n    if (audioStreamBasicDesc_.mSampleRate < 1) {\n        audioStreamBasicDesc_.mSampleRate = 32000;\n    }\n\n    audioStreamBasicDesc_.mFormatFlags = 0;\n    \n    switch (_audioCodecContext->codec_id) {\n        case CODEC_ID_MP3:\n        {\n            audioStreamBasicDesc_.mFormatID = kAudioFormatMPEGLayer3;\n            break;\n        }\n        case CODEC_ID_AAC:\n        {\n            audioStreamBasicDesc_.mFormatID = kAudioFormatMPEG4AAC;\n            audioStreamBasicDesc_.mFormatFlags = kMPEG4Object_AAC_LC;\n            audioStreamBasicDesc_.mSampleRate = _audioCodecContext->sample_rate;\n            audioStreamBasicDesc_.mChannelsPerFrame = _audioCodecContext->channels;\n            audioStreamBasicDesc_.mBitsPerChannel = 0;\n            audioStreamBasicDesc_.mFramesPerPacket =_audioCodecContext->frame_size;\n            audioStreamBasicDesc_.mBytesPerPacket = 0;\n            audioStreamBasicDesc_.mBytesPerFrame = _audioCodecContext->frame_bits;\n            audioStreamBasicDesc_.mReserved = 0;\n            NSLog(@\"audio format %s (%d) is  supported\",  _audioCodecContext->codec_descriptor->name, _audioCodecContext->codec_id);\n            \n            break;\n        }\n        case CODEC_ID_AC3:\n        {\n            audioStreamBasicDesc_.mFormatID = kAudioFormatAC3;\n            break;\n        }\n        case CODEC_ID_PCM_MULAW:\n        {\n            audioStreamBasicDesc_.mFormatID = kAudioFormatULaw;\n            audioStreamBasicDesc_.mSampleRate = 8000.0;\n            audioStreamBasicDesc_.mFormatFlags = 0;\n            audioStreamBasicDesc_.mFramesPerPacket = 1;\n            audioStreamBasicDesc_.mChannelsPerFrame = 1;\n            audioStreamBasicDesc_.mBitsPerChannel = 8;\n            audioStreamBasicDesc_.mBytesPerPacket = 1;\n            audioStreamBasicDesc_.mBytesPerFrame = 1;\n            NSLog(@\"found audio codec mulaw\");\n            break;\n        }\n        default:\n        {\n            NSLog(@\"Error: audio format '%s' (%d) is not supported\", _audioCodecContext->codec_descriptor->name, _audioCodecContext->codec_id);\n            audioStreamBasicDesc_.mFormatID = kAudioFormatAC3;\n            break;\n        }\n    }\n    \n//    if (audioStreamBasicDesc_.mFormatID != kAudioFormatULaw) {\n//        audioStreamBasicDesc_.mBytesPerPacket = 0;\n//        audioStreamBasicDesc_.mFramesPerPacket = _audioCodecContext->frame_size;\n//        audioStreamBasicDesc_.mBytesPerFrame = 0;\n//        audioStreamBasicDesc_.mChannelsPerFrame = _audioCodecContext->channels;\n//        audioStreamBasicDesc_.mBitsPerChannel = 0;        \n//    }\n    \n    OSStatus status = AudioQueueNewOutput(&audioStreamBasicDesc_, audioQueueOutputCallback, (__bridge void*)self, NULL, NULL, 0, &audioQueue_);\n    if (status != noErr) {\n      NSLog(@\"Could not create new output.\");\n      return NO;\n    }\n\n    status = AudioQueueAddPropertyListener(audioQueue_, kAudioQueueProperty_IsRunning, audioQueueIsRunningCallback, (__bridge void*)self);\n    if (status != noErr) {\n      NSLog(@\"Could not add propery listener. (kAudioQueueProperty_IsRunning)\");\n      return NO;\n    }\n\n    for (NSInteger i = 0; i < kNumAQBufs; ++i) {\n      status = AudioQueueAllocateBufferWithPacketDescriptions(audioQueue_,\n                                                              audioStreamBasicDesc_.mSampleRate * kAudioBufferSeconds / 8,\n                                                              _audioCodecContext->sample_rate * kAudioBufferSeconds / (_audioCodecContext->frame_size + 1),\n                                                              &audioQueueBuffer_[i]);\n      if (status != noErr) {\n        NSLog(@\"Could not allocate buffer.\");\n        return NO;\n      }\n    }\n    \n    return YES;\n}\n\n- (void)removeAudioQueue\n{\n    [self _stopAudio];\n    started_ = NO;\n\n    for (NSInteger i = 0; i < kNumAQBufs; ++i) {\n      AudioQueueFreeBuffer(audioQueue_, audioQueueBuffer_[i]);\n    }\n    \n    AudioQueueDispose(audioQueue_, YES);\n    \n    if (decodeLock_) {\n        [decodeLock_ unlock];\n        decodeLock_ = nil;\n    }\n}\n\n\n- (void)audioQueueOutputCallback:(AudioQueueRef)inAQ inBuffer:(AudioQueueBufferRef)inBuffer\n{\n    if (state_ == AUDIO_STATE_PLAYING) {\n      [self enqueueBuffer:inBuffer];\n    }\n}\n\n- (void)audioQueueIsRunningCallback\n{\n    UInt32 isRunning;\n    UInt32 size = sizeof(isRunning);\n    OSStatus status = AudioQueueGetProperty(audioQueue_, kAudioQueueProperty_IsRunning, &isRunning, &size);\n\n    if (status == noErr && !isRunning && state_ == AUDIO_STATE_PLAYING) {\n      state_ = AUDIO_STATE_STOP;\n\n      if (finished_) {\n      }\n    }\n}\n\n- (OSStatus)enqueueBuffer:(AudioQueueBufferRef)buffer\n{\n    OSStatus status = noErr;\n    \n    if (buffer) {\n        AudioTimeStamp bufferStartTime;\n        buffer->mAudioDataByteSize = 0;\n        buffer->mPacketDescriptionCount = 0;\n        \n        if (_streamer.audioPacketQueue.count <= 0) {\n            _streamer.emptyAudioBuffer = buffer;\n            return status;\n        }\n\n        _streamer.emptyAudioBuffer = nil;\n        \n        while (_streamer.audioPacketQueue.count && buffer->mPacketDescriptionCount < buffer->mPacketDescriptionCapacity) {\n            AVPacket *packet = [_streamer readPacket];\n            \n            if (buffer->mAudioDataBytesCapacity - buffer->mAudioDataByteSize >= packet->size) {\n                if (buffer->mPacketDescriptionCount == 0) {\n                    bufferStartTime.mSampleTime = packet->dts * _audioCodecContext->frame_size;\n                    bufferStartTime.mFlags = kAudioTimeStampSampleTimeValid;\n                }\n                \n                memcpy((uint8_t *)buffer->mAudioData + buffer->mAudioDataByteSize, packet->data, packet->size);\n                buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mStartOffset = buffer->mAudioDataByteSize;\n                buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mDataByteSize = packet->size;\n                buffer->mPacketDescriptions[buffer->mPacketDescriptionCount].mVariableFramesInPacket = _audioCodecContext->frame_size;\n                \n                buffer->mAudioDataByteSize += packet->size;\n                buffer->mPacketDescriptionCount++;\n                \n                \n                _streamer.audioPacketQueueSize -= packet->size;\n                            \n                av_free_packet(packet);\n            }\n            else {\n                break;\n            }\n        }\n        \n        [decodeLock_ lock];\n        if (buffer->mPacketDescriptionCount > 0) {\n            status = AudioQueueEnqueueBuffer(audioQueue_, buffer, 0, NULL);\n            if (status != noErr) { \n                NSLog(@\"Could not enqueue buffer.\");\n            }\n        } else {\n            AudioQueueStop(audioQueue_, NO);\n            finished_ = YES;\n        }\n        \n        [decodeLock_ unlock];\n    }\n    \n    return status;\n}\n\n- (OSStatus)startQueue\n{\n    OSStatus status = noErr;\n\n    if (!started_) {\n      status = AudioQueueStart(audioQueue_, NULL);\n      if (status == noErr) {\n        started_ = YES;\n      }\n      else {\n        NSLog(@\"Could not start audio queue.\");\n      }\n    }\n\n    return status;\n}\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegDecoder/RTSPPlayer.h",
    "content": "#import <Foundation/Foundation.h>\n#import \"avformat.h\"\n#import \"avcodec.h\"\n#import \"avio.h\"\n#import \"swscale.h\"\n#import <AudioToolbox/AudioQueue.h>\n#import <AudioToolbox/AudioToolbox.h>\n\n@interface RTSPPlayer : NSObject {\n\tAVFormatContext *pFormatCtx;\n\tAVCodecContext *pCodecCtx;\n    AVFrame *pFrame;\n    AVPacket packet;\n\tAVPicture picture;\n\tint videoStream;\n    int audioStream;\n\tstruct SwsContext *img_convert_ctx;\n\tint sourceWidth, sourceHeight;\n\tint outputWidth, outputHeight;\n\tUIImage *currentImage;\n\tdouble duration;\n    double currentTime;\n    NSLock *audioPacketQueueLock;\n    AVCodecContext *_audioCodecContext;\n    int16_t *_audioBuffer;\n    int audioPacketQueueSize;\n    NSMutableArray *audioPacketQueue;\n    AVStream *_audioStream;\n    NSUInteger _audioBufferSize;\n    BOOL _inBuffer;\n    AVPacket *_packet, _currentPacket;\n    BOOL primed;\n   \n\n}\n\n/* Last decoded picture as UIImage */\n@property (nonatomic, readonly) UIImage *currentImage;\n\n/* Size of video frame */\n@property (nonatomic, readonly) int sourceWidth, sourceHeight;\n\n/* Output image size. Set to the source size by default. */\n@property (nonatomic) int outputWidth, outputHeight;\n\n/* Length of video in seconds */\n@property (nonatomic, readonly) double duration;\n\n/* Current time of video in seconds */\n@property (nonatomic, readonly) double currentTime;\n\n@property (nonatomic, retain) NSMutableArray *audioPacketQueue;\n@property (nonatomic, assign) AVCodecContext *_audioCodecContext;\n@property (nonatomic, assign) AudioQueueBufferRef emptyAudioBuffer;\n@property (nonatomic, assign) int audioPacketQueueSize;\n@property (nonatomic, assign) AVStream *_audioStream;\n\n/* Initialize with movie at moviePath. Output dimensions are set to source dimensions. */\n-(id)initWithVideo:(NSString *)moviePath usesTcp:(BOOL)usesTcp;\n\n/* Read the next frame from the video stream. Returns false if no frame read (video over). */\n-(BOOL)stepFrame;\n\n/* Seek to closest keyframe near specified time */\n-(void)seekTime:(double)seconds;\n\n-(void)closeAudio;\n\n- (AVPacket*)readPacket;\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegDecoder/RTSPPlayer.m",
    "content": "#import \"RTSPPlayer.h\"\n#import \"Utilities.h\"\n#import \"AudioStreamer.h\"\n\n#ifndef AVCODEC_MAX_AUDIO_FRAME_SIZE\n# define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio\n#endif\n\n@interface RTSPPlayer ()\n@property (nonatomic, retain) AudioStreamer *audioController;\n@end\n\n@interface RTSPPlayer (private)\n-(void)convertFrameToRGB;\n-(UIImage *)imageFromAVPicture:(AVPicture)pict width:(int)width height:(int)height;\n-(void)savePicture:(AVPicture)pFrame width:(int)width height:(int)height index:(int)iFrame;\n-(void)setupScaler;\n@end\n\n@implementation RTSPPlayer\n\n@synthesize audioController = _audioController;\n@synthesize audioPacketQueue,audioPacketQueueSize;\n@synthesize _audioStream,_audioCodecContext;\n@synthesize emptyAudioBuffer;\n\n@synthesize outputWidth, outputHeight;\n\n- (void)setOutputWidth:(int)newValue\n{\n\tif (outputWidth != newValue) {\n        outputWidth = newValue;\n        [self setupScaler];\n    }\n}\n\n- (void)setOutputHeight:(int)newValue\n{\n\tif (outputHeight != newValue) {\n        outputHeight = newValue;\n        [self setupScaler];\n    }\n}\n\n- (UIImage *)currentImage\n{\n\tif (!pFrame->data[0]) return nil;\n\t[self convertFrameToRGB];\n\treturn [self imageFromAVPicture:picture width:outputWidth height:outputHeight];\n}\n\n- (double)duration\n{\n\treturn (double)pFormatCtx->duration / AV_TIME_BASE;\n}\n\n- (double)currentTime\n{\n    AVRational timeBase = pFormatCtx->streams[videoStream]->time_base;\n    return packet.pts * (double)timeBase.num / timeBase.den;\n}\n\n- (int)sourceWidth\n{\n\treturn pCodecCtx->width;\n}\n\n- (int)sourceHeight\n{\n\treturn pCodecCtx->height;\n}\n\n- (id)initWithVideo:(NSString *)moviePath usesTcp:(BOOL)usesTcp\n{\n\tif (!(self=[super init])) return nil;\n \n    AVCodec         *pCodec;\n\t\t\n    // Register all formats and codecs\n    avcodec_register_all();\n    av_register_all();\n    avformat_network_init();\n    \n    // Set the RTSP Options\n    AVDictionary *opts = 0;\n    if (usesTcp) \n        av_dict_set(&opts, \"rtsp_transport\", \"tcp\", 0);\n\n    \n    if (avformat_open_input(&pFormatCtx, [moviePath UTF8String], NULL, &opts) !=0 ) {\n        av_log(NULL, AV_LOG_ERROR, \"Couldn't open file\\n\");\n        goto initError;\n    }\n    \n    // Retrieve stream information\n    if (avformat_find_stream_info(pFormatCtx,NULL) < 0) {\n        av_log(NULL, AV_LOG_ERROR, \"Couldn't find stream information\\n\");\n        goto initError;\n    }\n    \n    // Find the first video stream\n    videoStream=-1;\n    audioStream=-1;\n\n    for (int i=0; i<pFormatCtx->nb_streams; i++) {\n        if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {\n            NSLog(@\"found video stream\");\n            videoStream=i;\n        }\n        \n        if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {\n            audioStream=i;\n            NSLog(@\"found audio stream\");\n        }\n    }\n    \n    if (videoStream==-1 && audioStream==-1) {\n        goto initError;\n    }\n\n    // Get a pointer to the codec context for the video stream\n    pCodecCtx = pFormatCtx->streams[videoStream]->codec;\n    \n    // Find the decoder for the video stream\n    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);\n    if (pCodec == NULL) {\n        av_log(NULL, AV_LOG_ERROR, \"Unsupported codec!\\n\");\n        goto initError;\n    }\n\t\n    // Open codec\n    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {\n        av_log(NULL, AV_LOG_ERROR, \"Cannot open video decoder\\n\");\n        goto initError;\n    }\n    \n    if (audioStream > -1 ) {\n        NSLog(@\"set up audiodecoder\");\n        [self setupAudioDecoder];\n    }\n\t\n    // Allocate video frame\n    pFrame = av_frame_alloc();\n\t\t\t\n\toutputWidth = pCodecCtx->width;\n\tself.outputHeight = pCodecCtx->height;\n\t\t\t\n\treturn self;\n\t\ninitError:\n\t[self release];\n\treturn nil;\n}\n\n\n- (void)setupScaler\n{\n\t// Release old picture and scaler\n\tavpicture_free(&picture);\n\tsws_freeContext(img_convert_ctx);\t\n\t\n\t// Allocate RGB picture\n\tavpicture_alloc(&picture, PIX_FMT_RGB24, outputWidth, outputHeight);\n\t\n\t// Setup scaler\n\tstatic int sws_flags =  SWS_FAST_BILINEAR;\n\timg_convert_ctx = sws_getContext(pCodecCtx->width, \n\t\t\t\t\t\t\t\t\t pCodecCtx->height,\n\t\t\t\t\t\t\t\t\t pCodecCtx->pix_fmt,\n\t\t\t\t\t\t\t\t\t outputWidth, \n\t\t\t\t\t\t\t\t\t outputHeight,\n\t\t\t\t\t\t\t\t\t PIX_FMT_RGB24,\n\t\t\t\t\t\t\t\t\t sws_flags, NULL, NULL, NULL);\n}\n\n- (void)seekTime:(double)seconds\n{\n\tAVRational timeBase = pFormatCtx->streams[videoStream]->time_base;\n\tint64_t targetFrame = (int64_t)((double)timeBase.den / timeBase.num * seconds);\n\tavformat_seek_file(pFormatCtx, videoStream, targetFrame, targetFrame, targetFrame, AVSEEK_FLAG_FRAME);\n\tavcodec_flush_buffers(pCodecCtx);\n}\n\n- (void)dealloc\n{\n\t// Free scaler\n\tsws_freeContext(img_convert_ctx);\t\n\n\t// Free RGB picture\n\tavpicture_free(&picture);\n    \n    // Free the packet that was allocated by av_read_frame\n    av_free_packet(&packet);\n\t\n    // Free the YUV frame\n    av_free(pFrame);\n\t\n    // Close the codec\n    if (pCodecCtx) avcodec_close(pCodecCtx);\n\t\n    // Close the video file\n    if (pFormatCtx) avformat_close_input(&pFormatCtx);\n\n    [_audioController _stopAudio];\n    [_audioController release];\n    _audioController = nil;\n\t\n    [audioPacketQueue release];\n    audioPacketQueue = nil;\n    \n    [audioPacketQueueLock release];\n    audioPacketQueueLock = nil;\n    \n\t[super dealloc];\n}\n\n- (BOOL)stepFrame\n{\n\t// AVPacket packet;\n    int frameFinished=0;\n\n    while (!frameFinished && av_read_frame(pFormatCtx, &packet) >=0 ) {\n        // Is this a packet from the video stream?\n        if(packet.stream_index==videoStream) {\n            // Decode video frame\n            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);\n        }\n        \n        if (packet.stream_index==audioStream) {\n            // NSLog(@\"audio stream\");\n            [audioPacketQueueLock lock];\n            \n            audioPacketQueueSize += packet.size;\n            [audioPacketQueue addObject:[NSMutableData dataWithBytes:&packet length:sizeof(packet)]];\n            \n            [audioPacketQueueLock unlock];\n            \n            if (!primed) {\n                primed=YES;\n                [_audioController _startAudio];\n            }\n            \n            if (emptyAudioBuffer) {\n                [_audioController enqueueBuffer:emptyAudioBuffer];\n            }\n        }\n\t}\n\tav_free_packet (&packet);\n\treturn frameFinished!=0;\n}\n\n- (void)convertFrameToRGB\n{\n    if(img_convert_ctx != NULL){\n        sws_scale(img_convert_ctx,\n                  (const uint8_t *const *)pFrame->data,\n                  pFrame->linesize,\n                  0,\n                  pCodecCtx->height,\n                  picture.data,\n                  picture.linesize);\n    }\n}\n\n- (UIImage *)imageFromAVPicture:(AVPicture)pict width:(int)width height:(int)height\n{\n\tCGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;\n\tCFDataRef data = CFDataCreateWithBytesNoCopy(kCFAllocatorDefault, pict.data[0], pict.linesize[0]*height,kCFAllocatorNull);\n\tCGDataProviderRef provider = CGDataProviderCreateWithCFData(data);\n\tCGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();\n\tCGImageRef cgImage = CGImageCreate(width, \n\t\t\t\t\t\t\t\t\t   height, \n\t\t\t\t\t\t\t\t\t   8, \n\t\t\t\t\t\t\t\t\t   24, \n\t\t\t\t\t\t\t\t\t   pict.linesize[0], \n\t\t\t\t\t\t\t\t\t   colorSpace, \n\t\t\t\t\t\t\t\t\t   bitmapInfo, \n\t\t\t\t\t\t\t\t\t   provider, \n\t\t\t\t\t\t\t\t\t   NULL, \n\t\t\t\t\t\t\t\t\t   NO, \n\t\t\t\t\t\t\t\t\t   kCGRenderingIntentDefault);\n\tCGColorSpaceRelease(colorSpace);\n\tUIImage *image = [UIImage imageWithCGImage:cgImage];\n\t\n    CGImageRelease(cgImage);\n\tCGDataProviderRelease(provider);\n\tCFRelease(data);\n\t\n\treturn image;\n}\n\n- (void)setupAudioDecoder\n{    \n    if (audioStream >= 0) {\n        _audioBufferSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;\n        _audioBuffer = av_malloc(_audioBufferSize);\n        _inBuffer = NO;\n        \n        _audioCodecContext = pFormatCtx->streams[audioStream]->codec;\n        _audioStream = pFormatCtx->streams[audioStream];\n        \n        AVCodec *codec = avcodec_find_decoder(_audioCodecContext->codec_id);\n        if (codec == NULL) {\n            NSLog(@\"Not found audio codec.\");\n            return;\n        }\n        \n        if (avcodec_open2(_audioCodecContext, codec, NULL) < 0) {\n            NSLog(@\"Could not open audio codec.\");\n            return;\n        }\n        \n        if (audioPacketQueue) {\n            [audioPacketQueue release];\n            audioPacketQueue = nil;\n        }        \n        audioPacketQueue = [[NSMutableArray alloc] init];\n        \n        if (audioPacketQueueLock) {\n            [audioPacketQueueLock release];\n            audioPacketQueueLock = nil;\n        }\n        audioPacketQueueLock = [[NSLock alloc] init];\n        \n        if (_audioController) {\n            [_audioController _stopAudio];\n            [_audioController release];\n            _audioController = nil;\n        }\n        _audioController = [[AudioStreamer alloc] initWithStreamer:self];\n    } else {\n        pFormatCtx->streams[audioStream]->discard = AVDISCARD_ALL;\n        audioStream = -1;\n    }\n}\n\n- (void)nextPacket\n{\n    _inBuffer = NO;\n}\n\n- (AVPacket*)readPacket\n{\n    if (_currentPacket.size > 0 || _inBuffer) return &_currentPacket;\n    \n    NSMutableData *packetData = [audioPacketQueue objectAtIndex:0];\n    _packet = [packetData mutableBytes];\n    \n    if (_packet) {\n        if (_packet->dts != AV_NOPTS_VALUE) {\n            _packet->dts += av_rescale_q(0, AV_TIME_BASE_Q, _audioStream->time_base);\n        }\n        \n        if (_packet->pts != AV_NOPTS_VALUE) {\n            _packet->pts += av_rescale_q(0, AV_TIME_BASE_Q, _audioStream->time_base);\n        }\n        \n        [audioPacketQueueLock lock];\n        audioPacketQueueSize -= _packet->size;\n        if ([audioPacketQueue count] > 0) {\n            [audioPacketQueue removeObjectAtIndex:0];\n        }\n        [audioPacketQueueLock unlock];\n        \n        _currentPacket = *(_packet);\n    }\n    \n    return &_currentPacket;   \n}\n\n- (void)closeAudio\n{\n    [_audioController _stopAudio];\n    primed=NO;\n}\n\n- (void)savePPMPicture:(AVPicture)pict width:(int)width height:(int)height index:(int)iFrame\n{\n    FILE *pFile;\n\tNSString *fileName;\n    int  y;\n\t\n\tfileName = [Utilities documentsPath:[NSString stringWithFormat:@\"image%04d.ppm\",iFrame]];\n    // Open file\n    NSLog(@\"write image file: %@\",fileName);\n    pFile=fopen([fileName cStringUsingEncoding:NSASCIIStringEncoding], \"wb\");\n    if (pFile == NULL) {\n        return;\n    }\n\t\n    // Write header\n    fprintf(pFile, \"P6\\n%d %d\\n255\\n\", width, height);\n\t\n    // Write pixel data\n    for (y=0; y<height; y++) {\n        fwrite(pict.data[0]+y*pict.linesize[0], 1, width*3, pFile);\n    }\n\t\n    // Close file\n    fclose(pFile);\n}\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegDecoder/Utilities.h",
    "content": "//\n//  Utilities.h\n//  iFrameExtractor\n//\n//  Created by lajos on 1/10/10.\n//\n//  Copyright 2010 Lajos Kamocsay\n//\n//  lajos at codza dot com\n//\n//  iFrameExtractor is free software; you can redistribute it and/or\n//  modify it under the terms of the GNU Lesser General Public\n//  License as published by the Free Software Foundation; either\n//  version 2.1 of the License, or (at your option) any later version.\n// \n//  iFrameExtractor is distributed in the hope that it will be useful,\n//  but WITHOUT ANY WARRANTY; without even the implied warranty of\n//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n//  Lesser General Public License for more details.\n//\n\n#import <Foundation/Foundation.h>\n\n@interface Utilities : NSObject {\n\n}\n\n+(NSString *)bundlePath:(NSString *)fileName;\n+(NSString *)documentsPath:(NSString *)fileName;\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegDecoder/Utilities.m",
    "content": "//\n//  Utilities.m\n//  iFrameExtractor\n//\n//  Created by lajos on 1/10/10.\n//\n//  Copyright 2010 Lajos Kamocsay\n//\n//  lajos at codza dot com\n//\n//  iFrameExtractor is free software; you can redistribute it and/or\n//  modify it under the terms of the GNU Lesser General Public\n//  License as published by the Free Software Foundation; either\n//  version 2.1 of the License, or (at your option) any later version.\n// \n//  iFrameExtractor is distributed in the hope that it will be useful,\n//  but WITHOUT ANY WARRANTY; without even the implied warranty of\n//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n//  Lesser General Public License for more details.\n//\n\n#import \"Utilities.h\"\n\n\n@implementation Utilities\n\n+(NSString *)bundlePath:(NSString *)fileName {\n\treturn [[[NSBundle mainBundle] bundlePath] stringByAppendingPathComponent:fileName];\n}\n\n+(NSString *)documentsPath:(NSString *)fileName {\n\tNSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);\n\tNSString *documentsDirectory = [paths objectAtIndex:0];\n\treturn [documentsDirectory stringByAppendingPathComponent:fileName];\n}\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/avcodec.h",
    "content": "/*\n * copyright (c) 2001 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_AVCODEC_H\n#define AVCODEC_AVCODEC_H\n\n/**\n * @file\n * @ingroup libavc\n * Libavcodec external API header\n */\n\n#include <errno.h>\n#include \"libavutil/samplefmt.h\"\n#include \"libavutil/attributes.h\"\n#include \"libavutil/avutil.h\"\n#include \"libavutil/buffer.h\"\n#include \"libavutil/cpu.h\"\n#include \"libavutil/channel_layout.h\"\n#include \"libavutil/dict.h\"\n#include \"libavutil/frame.h\"\n#include \"libavutil/log.h\"\n#include \"libavutil/pixfmt.h\"\n#include \"libavutil/rational.h\"\n\n#include \"version.h\"\n\n/**\n * @defgroup libavc Encoding/Decoding Library\n * @{\n *\n * @defgroup lavc_decoding Decoding\n * @{\n * @}\n *\n * @defgroup lavc_encoding Encoding\n * @{\n * @}\n *\n * @defgroup lavc_codec Codecs\n * @{\n * @defgroup lavc_codec_native Native Codecs\n * @{\n * @}\n * @defgroup lavc_codec_wrappers External library wrappers\n * @{\n * @}\n * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge\n * @{\n * @}\n * @}\n * @defgroup lavc_internal Internal\n * @{\n * @}\n * @}\n *\n */\n\n/**\n * @defgroup lavc_core Core functions/structures.\n * @ingroup libavc\n *\n * Basic definitions, functions for querying libavcodec capabilities,\n * allocating core structures, etc.\n * @{\n */\n\n\n/**\n * Identify the syntax and semantics of the bitstream.\n * The principle is roughly:\n * Two decoders with the same ID can decode the same streams.\n * Two encoders with the same ID can encode compatible streams.\n * There may be slight deviations from the principle due to implementation\n * details.\n *\n * If you add a codec ID to this list, add it so that\n * 1. no value of a existing codec ID changes (that would break ABI),\n * 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec.\n *    This ensures that 2 forks can independently add AVCodecIDs without producing conflicts.\n *\n * After adding new codec IDs, do not forget to add an entry to the codec\n * descriptor list and bump libavcodec minor version.\n */\nenum AVCodecID {\n    AV_CODEC_ID_NONE,\n\n    /* video codecs */\n    AV_CODEC_ID_MPEG1VIDEO,\n    AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding\n#if FF_API_XVMC\n    AV_CODEC_ID_MPEG2VIDEO_XVMC,\n#endif /* FF_API_XVMC */\n    AV_CODEC_ID_H261,\n    AV_CODEC_ID_H263,\n    AV_CODEC_ID_RV10,\n    AV_CODEC_ID_RV20,\n    AV_CODEC_ID_MJPEG,\n    AV_CODEC_ID_MJPEGB,\n    AV_CODEC_ID_LJPEG,\n    AV_CODEC_ID_SP5X,\n    AV_CODEC_ID_JPEGLS,\n    AV_CODEC_ID_MPEG4,\n    AV_CODEC_ID_RAWVIDEO,\n    AV_CODEC_ID_MSMPEG4V1,\n    AV_CODEC_ID_MSMPEG4V2,\n    AV_CODEC_ID_MSMPEG4V3,\n    AV_CODEC_ID_WMV1,\n    AV_CODEC_ID_WMV2,\n    AV_CODEC_ID_H263P,\n    AV_CODEC_ID_H263I,\n    AV_CODEC_ID_FLV1,\n    AV_CODEC_ID_SVQ1,\n    AV_CODEC_ID_SVQ3,\n    AV_CODEC_ID_DVVIDEO,\n    AV_CODEC_ID_HUFFYUV,\n    AV_CODEC_ID_CYUV,\n    AV_CODEC_ID_H264,\n    AV_CODEC_ID_INDEO3,\n    AV_CODEC_ID_VP3,\n    AV_CODEC_ID_THEORA,\n    AV_CODEC_ID_ASV1,\n    AV_CODEC_ID_ASV2,\n    AV_CODEC_ID_FFV1,\n    AV_CODEC_ID_4XM,\n    AV_CODEC_ID_VCR1,\n    AV_CODEC_ID_CLJR,\n    AV_CODEC_ID_MDEC,\n    AV_CODEC_ID_ROQ,\n    AV_CODEC_ID_INTERPLAY_VIDEO,\n    AV_CODEC_ID_XAN_WC3,\n    AV_CODEC_ID_XAN_WC4,\n    AV_CODEC_ID_RPZA,\n    AV_CODEC_ID_CINEPAK,\n    AV_CODEC_ID_WS_VQA,\n    AV_CODEC_ID_MSRLE,\n    AV_CODEC_ID_MSVIDEO1,\n    AV_CODEC_ID_IDCIN,\n    AV_CODEC_ID_8BPS,\n    AV_CODEC_ID_SMC,\n    AV_CODEC_ID_FLIC,\n    AV_CODEC_ID_TRUEMOTION1,\n    AV_CODEC_ID_VMDVIDEO,\n    AV_CODEC_ID_MSZH,\n    AV_CODEC_ID_ZLIB,\n    AV_CODEC_ID_QTRLE,\n    AV_CODEC_ID_TSCC,\n    AV_CODEC_ID_ULTI,\n    AV_CODEC_ID_QDRAW,\n    AV_CODEC_ID_VIXL,\n    AV_CODEC_ID_QPEG,\n    AV_CODEC_ID_PNG,\n    AV_CODEC_ID_PPM,\n    AV_CODEC_ID_PBM,\n    AV_CODEC_ID_PGM,\n    AV_CODEC_ID_PGMYUV,\n    AV_CODEC_ID_PAM,\n    AV_CODEC_ID_FFVHUFF,\n    AV_CODEC_ID_RV30,\n    AV_CODEC_ID_RV40,\n    AV_CODEC_ID_VC1,\n    AV_CODEC_ID_WMV3,\n    AV_CODEC_ID_LOCO,\n    AV_CODEC_ID_WNV1,\n    AV_CODEC_ID_AASC,\n    AV_CODEC_ID_INDEO2,\n    AV_CODEC_ID_FRAPS,\n    AV_CODEC_ID_TRUEMOTION2,\n    AV_CODEC_ID_BMP,\n    AV_CODEC_ID_CSCD,\n    AV_CODEC_ID_MMVIDEO,\n    AV_CODEC_ID_ZMBV,\n    AV_CODEC_ID_AVS,\n    AV_CODEC_ID_SMACKVIDEO,\n    AV_CODEC_ID_NUV,\n    AV_CODEC_ID_KMVC,\n    AV_CODEC_ID_FLASHSV,\n    AV_CODEC_ID_CAVS,\n    AV_CODEC_ID_JPEG2000,\n    AV_CODEC_ID_VMNC,\n    AV_CODEC_ID_VP5,\n    AV_CODEC_ID_VP6,\n    AV_CODEC_ID_VP6F,\n    AV_CODEC_ID_TARGA,\n    AV_CODEC_ID_DSICINVIDEO,\n    AV_CODEC_ID_TIERTEXSEQVIDEO,\n    AV_CODEC_ID_TIFF,\n    AV_CODEC_ID_GIF,\n    AV_CODEC_ID_DXA,\n    AV_CODEC_ID_DNXHD,\n    AV_CODEC_ID_THP,\n    AV_CODEC_ID_SGI,\n    AV_CODEC_ID_C93,\n    AV_CODEC_ID_BETHSOFTVID,\n    AV_CODEC_ID_PTX,\n    AV_CODEC_ID_TXD,\n    AV_CODEC_ID_VP6A,\n    AV_CODEC_ID_AMV,\n    AV_CODEC_ID_VB,\n    AV_CODEC_ID_PCX,\n    AV_CODEC_ID_SUNRAST,\n    AV_CODEC_ID_INDEO4,\n    AV_CODEC_ID_INDEO5,\n    AV_CODEC_ID_MIMIC,\n    AV_CODEC_ID_RL2,\n    AV_CODEC_ID_ESCAPE124,\n    AV_CODEC_ID_DIRAC,\n    AV_CODEC_ID_BFI,\n    AV_CODEC_ID_CMV,\n    AV_CODEC_ID_MOTIONPIXELS,\n    AV_CODEC_ID_TGV,\n    AV_CODEC_ID_TGQ,\n    AV_CODEC_ID_TQI,\n    AV_CODEC_ID_AURA,\n    AV_CODEC_ID_AURA2,\n    AV_CODEC_ID_V210X,\n    AV_CODEC_ID_TMV,\n    AV_CODEC_ID_V210,\n    AV_CODEC_ID_DPX,\n    AV_CODEC_ID_MAD,\n    AV_CODEC_ID_FRWU,\n    AV_CODEC_ID_FLASHSV2,\n    AV_CODEC_ID_CDGRAPHICS,\n    AV_CODEC_ID_R210,\n    AV_CODEC_ID_ANM,\n    AV_CODEC_ID_BINKVIDEO,\n    AV_CODEC_ID_IFF_ILBM,\n    AV_CODEC_ID_IFF_BYTERUN1,\n    AV_CODEC_ID_KGV1,\n    AV_CODEC_ID_YOP,\n    AV_CODEC_ID_VP8,\n    AV_CODEC_ID_PICTOR,\n    AV_CODEC_ID_ANSI,\n    AV_CODEC_ID_A64_MULTI,\n    AV_CODEC_ID_A64_MULTI5,\n    AV_CODEC_ID_R10K,\n    AV_CODEC_ID_MXPEG,\n    AV_CODEC_ID_LAGARITH,\n    AV_CODEC_ID_PRORES,\n    AV_CODEC_ID_JV,\n    AV_CODEC_ID_DFA,\n    AV_CODEC_ID_WMV3IMAGE,\n    AV_CODEC_ID_VC1IMAGE,\n    AV_CODEC_ID_UTVIDEO,\n    AV_CODEC_ID_BMV_VIDEO,\n    AV_CODEC_ID_VBLE,\n    AV_CODEC_ID_DXTORY,\n    AV_CODEC_ID_V410,\n    AV_CODEC_ID_XWD,\n    AV_CODEC_ID_CDXL,\n    AV_CODEC_ID_XBM,\n    AV_CODEC_ID_ZEROCODEC,\n    AV_CODEC_ID_MSS1,\n    AV_CODEC_ID_MSA1,\n    AV_CODEC_ID_TSCC2,\n    AV_CODEC_ID_MTS2,\n    AV_CODEC_ID_CLLC,\n    AV_CODEC_ID_MSS2,\n    AV_CODEC_ID_VP9,\n    AV_CODEC_ID_AIC,\n    AV_CODEC_ID_ESCAPE130_DEPRECATED,\n    AV_CODEC_ID_G2M_DEPRECATED,\n    AV_CODEC_ID_WEBP_DEPRECATED,\n    AV_CODEC_ID_HNM4_VIDEO,\n    AV_CODEC_ID_HEVC_DEPRECATED,\n    AV_CODEC_ID_FIC,\n    AV_CODEC_ID_ALIAS_PIX,\n    AV_CODEC_ID_BRENDER_PIX_DEPRECATED,\n    AV_CODEC_ID_PAF_VIDEO_DEPRECATED,\n    AV_CODEC_ID_EXR_DEPRECATED,\n    AV_CODEC_ID_VP7_DEPRECATED,\n    AV_CODEC_ID_SANM_DEPRECATED,\n    AV_CODEC_ID_SGIRLE_DEPRECATED,\n    AV_CODEC_ID_MVC1_DEPRECATED,\n    AV_CODEC_ID_MVC2_DEPRECATED,\n    AV_CODEC_ID_HQX,\n\n    AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'),\n    AV_CODEC_ID_Y41P       = MKBETAG('Y','4','1','P'),\n    AV_CODEC_ID_ESCAPE130  = MKBETAG('E','1','3','0'),\n    AV_CODEC_ID_EXR        = MKBETAG('0','E','X','R'),\n    AV_CODEC_ID_AVRP       = MKBETAG('A','V','R','P'),\n\n    AV_CODEC_ID_012V       = MKBETAG('0','1','2','V'),\n    AV_CODEC_ID_G2M        = MKBETAG( 0 ,'G','2','M'),\n    AV_CODEC_ID_AVUI       = MKBETAG('A','V','U','I'),\n    AV_CODEC_ID_AYUV       = MKBETAG('A','Y','U','V'),\n    AV_CODEC_ID_TARGA_Y216 = MKBETAG('T','2','1','6'),\n    AV_CODEC_ID_V308       = MKBETAG('V','3','0','8'),\n    AV_CODEC_ID_V408       = MKBETAG('V','4','0','8'),\n    AV_CODEC_ID_YUV4       = MKBETAG('Y','U','V','4'),\n    AV_CODEC_ID_SANM       = MKBETAG('S','A','N','M'),\n    AV_CODEC_ID_PAF_VIDEO  = MKBETAG('P','A','F','V'),\n    AV_CODEC_ID_AVRN       = MKBETAG('A','V','R','n'),\n    AV_CODEC_ID_CPIA       = MKBETAG('C','P','I','A'),\n    AV_CODEC_ID_XFACE      = MKBETAG('X','F','A','C'),\n    AV_CODEC_ID_SGIRLE     = MKBETAG('S','G','I','R'),\n    AV_CODEC_ID_MVC1       = MKBETAG('M','V','C','1'),\n    AV_CODEC_ID_MVC2       = MKBETAG('M','V','C','2'),\n    AV_CODEC_ID_SNOW       = MKBETAG('S','N','O','W'),\n    AV_CODEC_ID_WEBP       = MKBETAG('W','E','B','P'),\n    AV_CODEC_ID_SMVJPEG    = MKBETAG('S','M','V','J'),\n    AV_CODEC_ID_HEVC       = MKBETAG('H','2','6','5'),\n#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC\n    AV_CODEC_ID_VP7        = MKBETAG('V','P','7','0'),\n    AV_CODEC_ID_APNG       = MKBETAG('A','P','N','G'),\n\n    /* various PCM \"codecs\" */\n    AV_CODEC_ID_FIRST_AUDIO = 0x10000,     ///< A dummy id pointing at the start of audio codecs\n    AV_CODEC_ID_PCM_S16LE = 0x10000,\n    AV_CODEC_ID_PCM_S16BE,\n    AV_CODEC_ID_PCM_U16LE,\n    AV_CODEC_ID_PCM_U16BE,\n    AV_CODEC_ID_PCM_S8,\n    AV_CODEC_ID_PCM_U8,\n    AV_CODEC_ID_PCM_MULAW,\n    AV_CODEC_ID_PCM_ALAW,\n    AV_CODEC_ID_PCM_S32LE,\n    AV_CODEC_ID_PCM_S32BE,\n    AV_CODEC_ID_PCM_U32LE,\n    AV_CODEC_ID_PCM_U32BE,\n    AV_CODEC_ID_PCM_S24LE,\n    AV_CODEC_ID_PCM_S24BE,\n    AV_CODEC_ID_PCM_U24LE,\n    AV_CODEC_ID_PCM_U24BE,\n    AV_CODEC_ID_PCM_S24DAUD,\n    AV_CODEC_ID_PCM_ZORK,\n    AV_CODEC_ID_PCM_S16LE_PLANAR,\n    AV_CODEC_ID_PCM_DVD,\n    AV_CODEC_ID_PCM_F32BE,\n    AV_CODEC_ID_PCM_F32LE,\n    AV_CODEC_ID_PCM_F64BE,\n    AV_CODEC_ID_PCM_F64LE,\n    AV_CODEC_ID_PCM_BLURAY,\n    AV_CODEC_ID_PCM_LXF,\n    AV_CODEC_ID_S302M,\n    AV_CODEC_ID_PCM_S8_PLANAR,\n    AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED,\n    AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED,\n    AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'),\n    AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'),\n    AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16),\n\n    /* various ADPCM codecs */\n    AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,\n    AV_CODEC_ID_ADPCM_IMA_WAV,\n    AV_CODEC_ID_ADPCM_IMA_DK3,\n    AV_CODEC_ID_ADPCM_IMA_DK4,\n    AV_CODEC_ID_ADPCM_IMA_WS,\n    AV_CODEC_ID_ADPCM_IMA_SMJPEG,\n    AV_CODEC_ID_ADPCM_MS,\n    AV_CODEC_ID_ADPCM_4XM,\n    AV_CODEC_ID_ADPCM_XA,\n    AV_CODEC_ID_ADPCM_ADX,\n    AV_CODEC_ID_ADPCM_EA,\n    AV_CODEC_ID_ADPCM_G726,\n    AV_CODEC_ID_ADPCM_CT,\n    AV_CODEC_ID_ADPCM_SWF,\n    AV_CODEC_ID_ADPCM_YAMAHA,\n    AV_CODEC_ID_ADPCM_SBPRO_4,\n    AV_CODEC_ID_ADPCM_SBPRO_3,\n    AV_CODEC_ID_ADPCM_SBPRO_2,\n    AV_CODEC_ID_ADPCM_THP,\n    AV_CODEC_ID_ADPCM_IMA_AMV,\n    AV_CODEC_ID_ADPCM_EA_R1,\n    AV_CODEC_ID_ADPCM_EA_R3,\n    AV_CODEC_ID_ADPCM_EA_R2,\n    AV_CODEC_ID_ADPCM_IMA_EA_SEAD,\n    AV_CODEC_ID_ADPCM_IMA_EA_EACS,\n    AV_CODEC_ID_ADPCM_EA_XAS,\n    AV_CODEC_ID_ADPCM_EA_MAXIS_XA,\n    AV_CODEC_ID_ADPCM_IMA_ISS,\n    AV_CODEC_ID_ADPCM_G722,\n    AV_CODEC_ID_ADPCM_IMA_APC,\n    AV_CODEC_ID_ADPCM_VIMA_DEPRECATED,\n    AV_CODEC_ID_ADPCM_VIMA = MKBETAG('V','I','M','A'),\n#if FF_API_VIMA_DECODER\n    AV_CODEC_ID_VIMA       = MKBETAG('V','I','M','A'),\n#endif\n    AV_CODEC_ID_ADPCM_AFC  = MKBETAG('A','F','C',' '),\n    AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '),\n    AV_CODEC_ID_ADPCM_DTK  = MKBETAG('D','T','K',' '),\n    AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '),\n    AV_CODEC_ID_ADPCM_G726LE = MKBETAG('6','2','7','G'),\n\n    /* AMR */\n    AV_CODEC_ID_AMR_NB = 0x12000,\n    AV_CODEC_ID_AMR_WB,\n\n    /* RealAudio codecs*/\n    AV_CODEC_ID_RA_144 = 0x13000,\n    AV_CODEC_ID_RA_288,\n\n    /* various DPCM codecs */\n    AV_CODEC_ID_ROQ_DPCM = 0x14000,\n    AV_CODEC_ID_INTERPLAY_DPCM,\n    AV_CODEC_ID_XAN_DPCM,\n    AV_CODEC_ID_SOL_DPCM,\n\n    /* audio codecs */\n    AV_CODEC_ID_MP2 = 0x15000,\n    AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3\n    AV_CODEC_ID_AAC,\n    AV_CODEC_ID_AC3,\n    AV_CODEC_ID_DTS,\n    AV_CODEC_ID_VORBIS,\n    AV_CODEC_ID_DVAUDIO,\n    AV_CODEC_ID_WMAV1,\n    AV_CODEC_ID_WMAV2,\n    AV_CODEC_ID_MACE3,\n    AV_CODEC_ID_MACE6,\n    AV_CODEC_ID_VMDAUDIO,\n    AV_CODEC_ID_FLAC,\n    AV_CODEC_ID_MP3ADU,\n    AV_CODEC_ID_MP3ON4,\n    AV_CODEC_ID_SHORTEN,\n    AV_CODEC_ID_ALAC,\n    AV_CODEC_ID_WESTWOOD_SND1,\n    AV_CODEC_ID_GSM, ///< as in Berlin toast format\n    AV_CODEC_ID_QDM2,\n    AV_CODEC_ID_COOK,\n    AV_CODEC_ID_TRUESPEECH,\n    AV_CODEC_ID_TTA,\n    AV_CODEC_ID_SMACKAUDIO,\n    AV_CODEC_ID_QCELP,\n    AV_CODEC_ID_WAVPACK,\n    AV_CODEC_ID_DSICINAUDIO,\n    AV_CODEC_ID_IMC,\n    AV_CODEC_ID_MUSEPACK7,\n    AV_CODEC_ID_MLP,\n    AV_CODEC_ID_GSM_MS, /* as found in WAV */\n    AV_CODEC_ID_ATRAC3,\n#if FF_API_VOXWARE\n    AV_CODEC_ID_VOXWARE,\n#endif\n    AV_CODEC_ID_APE,\n    AV_CODEC_ID_NELLYMOSER,\n    AV_CODEC_ID_MUSEPACK8,\n    AV_CODEC_ID_SPEEX,\n    AV_CODEC_ID_WMAVOICE,\n    AV_CODEC_ID_WMAPRO,\n    AV_CODEC_ID_WMALOSSLESS,\n    AV_CODEC_ID_ATRAC3P,\n    AV_CODEC_ID_EAC3,\n    AV_CODEC_ID_SIPR,\n    AV_CODEC_ID_MP1,\n    AV_CODEC_ID_TWINVQ,\n    AV_CODEC_ID_TRUEHD,\n    AV_CODEC_ID_MP4ALS,\n    AV_CODEC_ID_ATRAC1,\n    AV_CODEC_ID_BINKAUDIO_RDFT,\n    AV_CODEC_ID_BINKAUDIO_DCT,\n    AV_CODEC_ID_AAC_LATM,\n    AV_CODEC_ID_QDMC,\n    AV_CODEC_ID_CELT,\n    AV_CODEC_ID_G723_1,\n    AV_CODEC_ID_G729,\n    AV_CODEC_ID_8SVX_EXP,\n    AV_CODEC_ID_8SVX_FIB,\n    AV_CODEC_ID_BMV_AUDIO,\n    AV_CODEC_ID_RALF,\n    AV_CODEC_ID_IAC,\n    AV_CODEC_ID_ILBC,\n    AV_CODEC_ID_OPUS_DEPRECATED,\n    AV_CODEC_ID_COMFORT_NOISE,\n    AV_CODEC_ID_TAK_DEPRECATED,\n    AV_CODEC_ID_METASOUND,\n    AV_CODEC_ID_PAF_AUDIO_DEPRECATED,\n    AV_CODEC_ID_ON2AVC,\n    AV_CODEC_ID_DSS_SP,\n    AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),\n    AV_CODEC_ID_SONIC       = MKBETAG('S','O','N','C'),\n    AV_CODEC_ID_SONIC_LS    = MKBETAG('S','O','N','L'),\n    AV_CODEC_ID_PAF_AUDIO   = MKBETAG('P','A','F','A'),\n    AV_CODEC_ID_OPUS        = MKBETAG('O','P','U','S'),\n    AV_CODEC_ID_TAK         = MKBETAG('t','B','a','K'),\n    AV_CODEC_ID_EVRC        = MKBETAG('s','e','v','c'),\n    AV_CODEC_ID_SMV         = MKBETAG('s','s','m','v'),\n    AV_CODEC_ID_DSD_LSBF    = MKBETAG('D','S','D','L'),\n    AV_CODEC_ID_DSD_MSBF    = MKBETAG('D','S','D','M'),\n    AV_CODEC_ID_DSD_LSBF_PLANAR = MKBETAG('D','S','D','1'),\n    AV_CODEC_ID_DSD_MSBF_PLANAR = MKBETAG('D','S','D','8'),\n\n    /* subtitle codecs */\n    AV_CODEC_ID_FIRST_SUBTITLE = 0x17000,          ///< A dummy ID pointing at the start of subtitle codecs.\n    AV_CODEC_ID_DVD_SUBTITLE = 0x17000,\n    AV_CODEC_ID_DVB_SUBTITLE,\n    AV_CODEC_ID_TEXT,  ///< raw UTF-8 text\n    AV_CODEC_ID_XSUB,\n    AV_CODEC_ID_SSA,\n    AV_CODEC_ID_MOV_TEXT,\n    AV_CODEC_ID_HDMV_PGS_SUBTITLE,\n    AV_CODEC_ID_DVB_TELETEXT,\n    AV_CODEC_ID_SRT,\n    AV_CODEC_ID_MICRODVD   = MKBETAG('m','D','V','D'),\n    AV_CODEC_ID_EIA_608    = MKBETAG('c','6','0','8'),\n    AV_CODEC_ID_JACOSUB    = MKBETAG('J','S','U','B'),\n    AV_CODEC_ID_SAMI       = MKBETAG('S','A','M','I'),\n    AV_CODEC_ID_REALTEXT   = MKBETAG('R','T','X','T'),\n    AV_CODEC_ID_STL        = MKBETAG('S','p','T','L'),\n    AV_CODEC_ID_SUBVIEWER1 = MKBETAG('S','b','V','1'),\n    AV_CODEC_ID_SUBVIEWER  = MKBETAG('S','u','b','V'),\n    AV_CODEC_ID_SUBRIP     = MKBETAG('S','R','i','p'),\n    AV_CODEC_ID_WEBVTT     = MKBETAG('W','V','T','T'),\n    AV_CODEC_ID_MPL2       = MKBETAG('M','P','L','2'),\n    AV_CODEC_ID_VPLAYER    = MKBETAG('V','P','l','r'),\n    AV_CODEC_ID_PJS        = MKBETAG('P','h','J','S'),\n    AV_CODEC_ID_ASS        = MKBETAG('A','S','S',' '),  ///< ASS as defined in Matroska\n\n    /* other specific kind of codecs (generally used for attachments) */\n    AV_CODEC_ID_FIRST_UNKNOWN = 0x18000,           ///< A dummy ID pointing at the start of various fake codecs.\n    AV_CODEC_ID_TTF = 0x18000,\n    AV_CODEC_ID_BINTEXT    = MKBETAG('B','T','X','T'),\n    AV_CODEC_ID_XBIN       = MKBETAG('X','B','I','N'),\n    AV_CODEC_ID_IDF        = MKBETAG( 0 ,'I','D','F'),\n    AV_CODEC_ID_OTF        = MKBETAG( 0 ,'O','T','F'),\n    AV_CODEC_ID_SMPTE_KLV  = MKBETAG('K','L','V','A'),\n    AV_CODEC_ID_DVD_NAV    = MKBETAG('D','N','A','V'),\n    AV_CODEC_ID_TIMED_ID3  = MKBETAG('T','I','D','3'),\n    AV_CODEC_ID_BIN_DATA   = MKBETAG('D','A','T','A'),\n\n\n    AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it\n\n    AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS\n                                * stream (only used by libavformat) */\n    AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems\n                                * stream (only used by libavformat) */\n    AV_CODEC_ID_FFMETADATA = 0x21000,   ///< Dummy codec for streams containing only metadata information.\n\n#if FF_API_CODEC_ID\n#include \"old_codec_ids.h\"\n#endif\n};\n\n/**\n * This struct describes the properties of a single codec described by an\n * AVCodecID.\n * @see avcodec_descriptor_get()\n */\ntypedef struct AVCodecDescriptor {\n    enum AVCodecID     id;\n    enum AVMediaType type;\n    /**\n     * Name of the codec described by this descriptor. It is non-empty and\n     * unique for each codec descriptor. It should contain alphanumeric\n     * characters and '_' only.\n     */\n    const char      *name;\n    /**\n     * A more descriptive name for this codec. May be NULL.\n     */\n    const char *long_name;\n    /**\n     * Codec properties, a combination of AV_CODEC_PROP_* flags.\n     */\n    int             props;\n\n    /**\n     * MIME type(s) associated with the codec.\n     * May be NULL; if not, a NULL-terminated array of MIME types.\n     * The first item is always non-NULL and is the preferred MIME type.\n     */\n    const char *const *mime_types;\n} AVCodecDescriptor;\n\n/**\n * Codec uses only intra compression.\n * Video codecs only.\n */\n#define AV_CODEC_PROP_INTRA_ONLY    (1 << 0)\n/**\n * Codec supports lossy compression. Audio and video codecs only.\n * @note a codec may support both lossy and lossless\n * compression modes\n */\n#define AV_CODEC_PROP_LOSSY         (1 << 1)\n/**\n * Codec supports lossless compression. Audio and video codecs only.\n */\n#define AV_CODEC_PROP_LOSSLESS      (1 << 2)\n/**\n * Codec supports frame reordering. That is, the coded order (the order in which\n * the encoded packets are output by the encoders / stored / input to the\n * decoders) may be different from the presentation order of the corresponding\n * frames.\n *\n * For codecs that do not have this property set, PTS and DTS should always be\n * equal.\n */\n#define AV_CODEC_PROP_REORDER       (1 << 3)\n/**\n * Subtitle codec is bitmap based\n * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.\n */\n#define AV_CODEC_PROP_BITMAP_SUB    (1 << 16)\n/**\n * Subtitle codec is text based.\n * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.\n */\n#define AV_CODEC_PROP_TEXT_SUB      (1 << 17)\n\n/**\n * @ingroup lavc_decoding\n * Required number of additionally allocated bytes at the end of the input bitstream for decoding.\n * This is mainly needed because some optimized bitstream readers read\n * 32 or 64 bit at once and could read over the end.<br>\n * Note: If the first 23 bits of the additional bytes are not 0, then damaged\n * MPEG bitstreams could cause overread and segfault.\n */\n#define FF_INPUT_BUFFER_PADDING_SIZE 32\n\n/**\n * @ingroup lavc_encoding\n * minimum encoding buffer size\n * Used to avoid some checks during header writing.\n */\n#define FF_MIN_BUFFER_SIZE 16384\n\n\n/**\n * @ingroup lavc_encoding\n * motion estimation type.\n */\nenum Motion_Est_ID {\n    ME_ZERO = 1,    ///< no search, that is use 0,0 vector whenever one is needed\n    ME_FULL,\n    ME_LOG,\n    ME_PHODS,\n    ME_EPZS,        ///< enhanced predictive zonal search\n    ME_X1,          ///< reserved for experiments\n    ME_HEX,         ///< hexagon based search\n    ME_UMH,         ///< uneven multi-hexagon search\n    ME_TESA,        ///< transformed exhaustive search algorithm\n    ME_ITER=50,     ///< iterative search\n};\n\n/**\n * @ingroup lavc_decoding\n */\nenum AVDiscard{\n    /* We leave some space between them for extensions (drop some\n     * keyframes for intra-only or drop just some bidir frames). */\n    AVDISCARD_NONE    =-16, ///< discard nothing\n    AVDISCARD_DEFAULT =  0, ///< discard useless packets like 0 size packets in avi\n    AVDISCARD_NONREF  =  8, ///< discard all non reference\n    AVDISCARD_BIDIR   = 16, ///< discard all bidirectional frames\n    AVDISCARD_NONINTRA= 24, ///< discard all non intra frames\n    AVDISCARD_NONKEY  = 32, ///< discard all frames except keyframes\n    AVDISCARD_ALL     = 48, ///< discard all\n};\n\nenum AVAudioServiceType {\n    AV_AUDIO_SERVICE_TYPE_MAIN              = 0,\n    AV_AUDIO_SERVICE_TYPE_EFFECTS           = 1,\n    AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,\n    AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED  = 3,\n    AV_AUDIO_SERVICE_TYPE_DIALOGUE          = 4,\n    AV_AUDIO_SERVICE_TYPE_COMMENTARY        = 5,\n    AV_AUDIO_SERVICE_TYPE_EMERGENCY         = 6,\n    AV_AUDIO_SERVICE_TYPE_VOICE_OVER        = 7,\n    AV_AUDIO_SERVICE_TYPE_KARAOKE           = 8,\n    AV_AUDIO_SERVICE_TYPE_NB                   , ///< Not part of ABI\n};\n\n/**\n * @ingroup lavc_encoding\n */\ntypedef struct RcOverride{\n    int start_frame;\n    int end_frame;\n    int qscale; // If this is 0 then quality_factor will be used instead.\n    float quality_factor;\n} RcOverride;\n\n#if FF_API_MAX_BFRAMES\n/**\n * @deprecated there is no libavcodec-wide limit on the number of B-frames\n */\n#define FF_MAX_B_FRAMES 16\n#endif\n\n/* encoding support\n   These flags can be passed in AVCodecContext.flags before initialization.\n   Note: Not everything is supported yet.\n*/\n\n/**\n * Allow decoders to produce frames with data planes that are not aligned\n * to CPU requirements (e.g. due to cropping).\n */\n#define CODEC_FLAG_UNALIGNED 0x0001\n#define CODEC_FLAG_QSCALE 0x0002  ///< Use fixed qscale.\n#define CODEC_FLAG_4MV    0x0004  ///< 4 MV per MB allowed / advanced prediction for H.263.\n#define CODEC_FLAG_OUTPUT_CORRUPT 0x0008 ///< Output even those frames that might be corrupted\n#define CODEC_FLAG_QPEL   0x0010  ///< Use qpel MC.\n#if FF_API_GMC\n/**\n * @deprecated use the \"gmc\" private option of the libxvid encoder\n */\n#define CODEC_FLAG_GMC    0x0020  ///< Use GMC.\n#endif\n#if FF_API_MV0\n/**\n * @deprecated use the flag \"mv0\" in the \"mpv_flags\" private option of the\n * mpegvideo encoders\n */\n#define CODEC_FLAG_MV0    0x0040\n#endif\n#if FF_API_INPUT_PRESERVED\n/**\n * @deprecated passing reference-counted frames to the encoders replaces this\n * flag\n */\n#define CODEC_FLAG_INPUT_PRESERVED 0x0100\n#endif\n#define CODEC_FLAG_PASS1           0x0200   ///< Use internal 2pass ratecontrol in first pass mode.\n#define CODEC_FLAG_PASS2           0x0400   ///< Use internal 2pass ratecontrol in second pass mode.\n#define CODEC_FLAG_GRAY            0x2000   ///< Only decode/encode grayscale.\n#if FF_API_EMU_EDGE\n/**\n * @deprecated edges are not used/required anymore. I.e. this flag is now always\n * set.\n */\n#define CODEC_FLAG_EMU_EDGE        0x4000\n#endif\n#define CODEC_FLAG_PSNR            0x8000   ///< error[?] variables will be set during encoding.\n#define CODEC_FLAG_TRUNCATED       0x00010000 /** Input bitstream might be truncated at a random\n                                                  location instead of only at frame boundaries. */\n#if FF_API_NORMALIZE_AQP\n/**\n * @deprecated use the flag \"naq\" in the \"mpv_flags\" private option of the\n * mpegvideo encoders\n */\n#define CODEC_FLAG_NORMALIZE_AQP  0x00020000\n#endif\n#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.\n#define CODEC_FLAG_LOW_DELAY      0x00080000 ///< Force low delay.\n#define CODEC_FLAG_GLOBAL_HEADER  0x00400000 ///< Place global headers in extradata instead of every keyframe.\n#define CODEC_FLAG_BITEXACT       0x00800000 ///< Use only bitexact stuff (except (I)DCT).\n/* Fx : Flag for h263+ extra options */\n#define CODEC_FLAG_AC_PRED        0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction\n#define CODEC_FLAG_LOOP_FILTER    0x00000800 ///< loop filter\n#define CODEC_FLAG_INTERLACED_ME  0x20000000 ///< interlaced motion estimation\n#define CODEC_FLAG_CLOSED_GOP     0x80000000\n#define CODEC_FLAG2_FAST          0x00000001 ///< Allow non spec compliant speedup tricks.\n#define CODEC_FLAG2_NO_OUTPUT     0x00000004 ///< Skip bitstream encoding.\n#define CODEC_FLAG2_LOCAL_HEADER  0x00000008 ///< Place global headers at every keyframe instead of in extradata.\n#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!!\n#define CODEC_FLAG2_IGNORE_CROP   0x00010000 ///< Discard cropping information from SPS.\n\n#define CODEC_FLAG2_CHUNKS        0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.\n#define CODEC_FLAG2_SHOW_ALL      0x00400000 ///< Show all frames before the first keyframe\n#define CODEC_FLAG2_EXPORT_MVS    0x10000000 ///< Export motion vectors through frame side data\n#define CODEC_FLAG2_SKIP_MANUAL   0x20000000 ///< Do not skip samples and export skip information as frame side data\n\n/* Unsupported options :\n *              Syntax Arithmetic coding (SAC)\n *              Reference Picture Selection\n *              Independent Segment Decoding */\n/* /Fx */\n/* codec capabilities */\n\n#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback.\n/**\n * Codec uses get_buffer() for allocating buffers and supports custom allocators.\n * If not set, it might not use get_buffer() at all or use operations that\n * assume the buffer was allocated by avcodec_default_get_buffer.\n */\n#define CODEC_CAP_DR1             0x0002\n#define CODEC_CAP_TRUNCATED       0x0008\n#if FF_API_XVMC\n/* Codec can export data for HW decoding. This flag indicates that\n * the codec would call get_format() with list that might contain HW accelerated\n * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them\n * including raw image format.\n * The application can use the passed context to determine bitstream version,\n * chroma format, resolution etc.\n */\n#define CODEC_CAP_HWACCEL         0x0010\n#endif /* FF_API_XVMC */\n/**\n * Encoder or decoder requires flushing with NULL input at the end in order to\n * give the complete and correct output.\n *\n * NOTE: If this flag is not set, the codec is guaranteed to never be fed with\n *       with NULL data. The user can still send NULL data to the public encode\n *       or decode function, but libavcodec will not pass it along to the codec\n *       unless this flag is set.\n *\n * Decoders:\n * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,\n * avpkt->size=0 at the end to get the delayed data until the decoder no longer\n * returns frames.\n *\n * Encoders:\n * The encoder needs to be fed with NULL data at the end of encoding until the\n * encoder no longer returns data.\n *\n * NOTE: For encoders implementing the AVCodec.encode2() function, setting this\n *       flag also means that the encoder must set the pts and duration for\n *       each output packet. If this flag is not set, the pts and duration will\n *       be determined by libavcodec from the input frame.\n */\n#define CODEC_CAP_DELAY           0x0020\n/**\n * Codec can be fed a final frame with a smaller size.\n * This can be used to prevent truncation of the last audio samples.\n */\n#define CODEC_CAP_SMALL_LAST_FRAME 0x0040\n#if FF_API_CAP_VDPAU\n/**\n * Codec can export data for HW decoding (VDPAU).\n */\n#define CODEC_CAP_HWACCEL_VDPAU    0x0080\n#endif\n/**\n * Codec can output multiple frames per AVPacket\n * Normally demuxers return one frame at a time, demuxers which do not do\n * are connected to a parser to split what they return into proper frames.\n * This flag is reserved to the very rare category of codecs which have a\n * bitstream that cannot be split into frames without timeconsuming\n * operations like full decoding. Demuxers carring such bitstreams thus\n * may return multiple frames in a packet. This has many disadvantages like\n * prohibiting stream copy in many cases thus it should only be considered\n * as a last resort.\n */\n#define CODEC_CAP_SUBFRAMES        0x0100\n/**\n * Codec is experimental and is thus avoided in favor of non experimental\n * encoders\n */\n#define CODEC_CAP_EXPERIMENTAL     0x0200\n/**\n * Codec should fill in channel configuration and samplerate instead of container\n */\n#define CODEC_CAP_CHANNEL_CONF     0x0400\n#if FF_API_NEG_LINESIZES\n/**\n * @deprecated no codecs use this capability\n */\n#define CODEC_CAP_NEG_LINESIZES    0x0800\n#endif\n/**\n * Codec supports frame-level multithreading.\n */\n#define CODEC_CAP_FRAME_THREADS    0x1000\n/**\n * Codec supports slice-based (or partition-based) multithreading.\n */\n#define CODEC_CAP_SLICE_THREADS    0x2000\n/**\n * Codec supports changed parameters at any point.\n */\n#define CODEC_CAP_PARAM_CHANGE     0x4000\n/**\n * Codec supports avctx->thread_count == 0 (auto).\n */\n#define CODEC_CAP_AUTO_THREADS     0x8000\n/**\n * Audio encoder supports receiving a different number of samples in each call.\n */\n#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000\n/**\n * Codec is intra only.\n */\n#define CODEC_CAP_INTRA_ONLY       0x40000000\n/**\n * Codec is lossless.\n */\n#define CODEC_CAP_LOSSLESS         0x80000000\n\n#if FF_API_MB_TYPE\n//The following defines may change, don't expect compatibility if you use them.\n#define MB_TYPE_INTRA4x4   0x0001\n#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific\n#define MB_TYPE_INTRA_PCM  0x0004 //FIXME H.264-specific\n#define MB_TYPE_16x16      0x0008\n#define MB_TYPE_16x8       0x0010\n#define MB_TYPE_8x16       0x0020\n#define MB_TYPE_8x8        0x0040\n#define MB_TYPE_INTERLACED 0x0080\n#define MB_TYPE_DIRECT2    0x0100 //FIXME\n#define MB_TYPE_ACPRED     0x0200\n#define MB_TYPE_GMC        0x0400\n#define MB_TYPE_SKIP       0x0800\n#define MB_TYPE_P0L0       0x1000\n#define MB_TYPE_P1L0       0x2000\n#define MB_TYPE_P0L1       0x4000\n#define MB_TYPE_P1L1       0x8000\n#define MB_TYPE_L0         (MB_TYPE_P0L0 | MB_TYPE_P1L0)\n#define MB_TYPE_L1         (MB_TYPE_P0L1 | MB_TYPE_P1L1)\n#define MB_TYPE_L0L1       (MB_TYPE_L0   | MB_TYPE_L1)\n#define MB_TYPE_QUANT      0x00010000\n#define MB_TYPE_CBP        0x00020000\n//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)\n#endif\n\n/**\n * Pan Scan area.\n * This specifies the area which should be displayed.\n * Note there may be multiple such areas for one frame.\n */\ntypedef struct AVPanScan{\n    /**\n     * id\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    int id;\n\n    /**\n     * width and height in 1/16 pel\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    int width;\n    int height;\n\n    /**\n     * position of the top left corner in 1/16 pel for up to 3 fields/frames\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    int16_t position[3][2];\n}AVPanScan;\n\n#if FF_API_QSCALE_TYPE\n#define FF_QSCALE_TYPE_MPEG1 0\n#define FF_QSCALE_TYPE_MPEG2 1\n#define FF_QSCALE_TYPE_H264  2\n#define FF_QSCALE_TYPE_VP56  3\n#endif\n\n#if FF_API_GET_BUFFER\n#define FF_BUFFER_TYPE_INTERNAL 1\n#define FF_BUFFER_TYPE_USER     2 ///< direct rendering buffers (image is (de)allocated by user)\n#define FF_BUFFER_TYPE_SHARED   4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.\n#define FF_BUFFER_TYPE_COPY     8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.\n\n#define FF_BUFFER_HINTS_VALID    0x01 // Buffer hints value is meaningful (if 0 ignore).\n#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.\n#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.\n#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).\n#endif\n\n/**\n * The decoder will keep a reference to the frame and may reuse it later.\n */\n#define AV_GET_BUFFER_FLAG_REF (1 << 0)\n\n/**\n * @defgroup lavc_packet AVPacket\n *\n * Types and functions for working with AVPacket.\n * @{\n */\nenum AVPacketSideDataType {\n    AV_PKT_DATA_PALETTE,\n    AV_PKT_DATA_NEW_EXTRADATA,\n\n    /**\n     * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:\n     * @code\n     * u32le param_flags\n     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)\n     *     s32le channel_count\n     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)\n     *     u64le channel_layout\n     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)\n     *     s32le sample_rate\n     * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)\n     *     s32le width\n     *     s32le height\n     * @endcode\n     */\n    AV_PKT_DATA_PARAM_CHANGE,\n\n    /**\n     * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of\n     * structures with info about macroblocks relevant to splitting the\n     * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).\n     * That is, it does not necessarily contain info about all macroblocks,\n     * as long as the distance between macroblocks in the info is smaller\n     * than the target payload size.\n     * Each MB info structure is 12 bytes, and is laid out as follows:\n     * @code\n     * u32le bit offset from the start of the packet\n     * u8    current quantizer at the start of the macroblock\n     * u8    GOB number\n     * u16le macroblock address within the GOB\n     * u8    horizontal MV predictor\n     * u8    vertical MV predictor\n     * u8    horizontal MV predictor for block number 3\n     * u8    vertical MV predictor for block number 3\n     * @endcode\n     */\n    AV_PKT_DATA_H263_MB_INFO,\n\n    /**\n     * This side data should be associated with an audio stream and contains\n     * ReplayGain information in form of the AVReplayGain struct.\n     */\n    AV_PKT_DATA_REPLAYGAIN,\n\n    /**\n     * This side data contains a 3x3 transformation matrix describing an affine\n     * transformation that needs to be applied to the decoded video frames for\n     * correct presentation.\n     *\n     * See libavutil/display.h for a detailed description of the data.\n     */\n    AV_PKT_DATA_DISPLAYMATRIX,\n\n    /**\n     * This side data should be associated with a video stream and contains\n     * Stereoscopic 3D information in form of the AVStereo3D struct.\n     */\n    AV_PKT_DATA_STEREO3D,\n\n    /**\n     * This side data should be associated with an audio stream and corresponds\n     * to enum AVAudioServiceType.\n     */\n    AV_PKT_DATA_AUDIO_SERVICE_TYPE,\n\n    /**\n     * Recommmends skipping the specified number of samples\n     * @code\n     * u32le number of samples to skip from start of this packet\n     * u32le number of samples to skip from end of this packet\n     * u8    reason for start skip\n     * u8    reason for end   skip (0=padding silence, 1=convergence)\n     * @endcode\n     */\n    AV_PKT_DATA_SKIP_SAMPLES=70,\n\n    /**\n     * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that\n     * the packet may contain \"dual mono\" audio specific to Japanese DTV\n     * and if it is true, recommends only the selected channel to be used.\n     * @code\n     * u8    selected channels (0=mail/left, 1=sub/right, 2=both)\n     * @endcode\n     */\n    AV_PKT_DATA_JP_DUALMONO,\n\n    /**\n     * A list of zero terminated key/value strings. There is no end marker for\n     * the list, so it is required to rely on the side data size to stop.\n     */\n    AV_PKT_DATA_STRINGS_METADATA,\n\n    /**\n     * Subtitle event position\n     * @code\n     * u32le x1\n     * u32le y1\n     * u32le x2\n     * u32le y2\n     * @endcode\n     */\n    AV_PKT_DATA_SUBTITLE_POSITION,\n\n    /**\n     * Data found in BlockAdditional element of matroska container. There is\n     * no end marker for the data, so it is required to rely on the side data\n     * size to recognize the end. 8 byte id (as found in BlockAddId) followed\n     * by data.\n     */\n    AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,\n\n    /**\n     * The optional first identifier line of a WebVTT cue.\n     */\n    AV_PKT_DATA_WEBVTT_IDENTIFIER,\n\n    /**\n     * The optional settings (rendering instructions) that immediately\n     * follow the timestamp specifier of a WebVTT cue.\n     */\n    AV_PKT_DATA_WEBVTT_SETTINGS,\n\n    /**\n     * A list of zero terminated key/value strings. There is no end marker for\n     * the list, so it is required to rely on the side data size to stop. This\n     * side data includes updated metadata which appeared in the stream.\n     */\n    AV_PKT_DATA_METADATA_UPDATE,\n};\n\ntypedef struct AVPacketSideData {\n    uint8_t *data;\n    int      size;\n    enum AVPacketSideDataType type;\n} AVPacketSideData;\n\n/**\n * This structure stores compressed data. It is typically exported by demuxers\n * and then passed as input to decoders, or received as output from encoders and\n * then passed to muxers.\n *\n * For video, it should typically contain one compressed frame. For audio it may\n * contain several compressed frames.\n *\n * AVPacket is one of the few structs in FFmpeg, whose size is a part of public\n * ABI. Thus it may be allocated on stack and no new fields can be added to it\n * without libavcodec and libavformat major bump.\n *\n * The semantics of data ownership depends on the buf or destruct (deprecated)\n * fields. If either is set, the packet data is dynamically allocated and is\n * valid indefinitely until av_free_packet() is called (which in turn calls\n * av_buffer_unref()/the destruct callback to free the data). If neither is set,\n * the packet data is typically backed by some static buffer somewhere and is\n * only valid for a limited time (e.g. until the next read call when demuxing).\n *\n * The side data is always allocated with av_malloc() and is freed in\n * av_free_packet().\n */\ntypedef struct AVPacket {\n    /**\n     * A reference to the reference-counted buffer where the packet data is\n     * stored.\n     * May be NULL, then the packet data is not reference-counted.\n     */\n    AVBufferRef *buf;\n    /**\n     * Presentation timestamp in AVStream->time_base units; the time at which\n     * the decompressed packet will be presented to the user.\n     * Can be AV_NOPTS_VALUE if it is not stored in the file.\n     * pts MUST be larger or equal to dts as presentation cannot happen before\n     * decompression, unless one wants to view hex dumps. Some formats misuse\n     * the terms dts and pts/cts to mean something different. Such timestamps\n     * must be converted to true pts/dts before they are stored in AVPacket.\n     */\n    int64_t pts;\n    /**\n     * Decompression timestamp in AVStream->time_base units; the time at which\n     * the packet is decompressed.\n     * Can be AV_NOPTS_VALUE if it is not stored in the file.\n     */\n    int64_t dts;\n    uint8_t *data;\n    int   size;\n    int   stream_index;\n    /**\n     * A combination of AV_PKT_FLAG values\n     */\n    int   flags;\n    /**\n     * Additional packet data that can be provided by the container.\n     * Packet can contain several types of side information.\n     */\n    AVPacketSideData *side_data;\n    int side_data_elems;\n\n    /**\n     * Duration of this packet in AVStream->time_base units, 0 if unknown.\n     * Equals next_pts - this_pts in presentation order.\n     */\n    int   duration;\n#if FF_API_DESTRUCT_PACKET\n    attribute_deprecated\n    void  (*destruct)(struct AVPacket *);\n    attribute_deprecated\n    void  *priv;\n#endif\n    int64_t pos;                            ///< byte position in stream, -1 if unknown\n\n    /**\n     * Time difference in AVStream->time_base units from the pts of this\n     * packet to the point at which the output from the decoder has converged\n     * independent from the availability of previous frames. That is, the\n     * frames are virtually identical no matter if decoding started from\n     * the very first frame or from this keyframe.\n     * Is AV_NOPTS_VALUE if unknown.\n     * This field is not the display duration of the current packet.\n     * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY\n     * set.\n     *\n     * The purpose of this field is to allow seeking in streams that have no\n     * keyframes in the conventional sense. It corresponds to the\n     * recovery point SEI in H.264 and match_time_delta in NUT. It is also\n     * essential for some types of subtitle streams to ensure that all\n     * subtitles are correctly displayed after seeking.\n     */\n    int64_t convergence_duration;\n} AVPacket;\n#define AV_PKT_FLAG_KEY     0x0001 ///< The packet contains a keyframe\n#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted\n\nenum AVSideDataParamChangeFlags {\n    AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT  = 0x0001,\n    AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,\n    AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE    = 0x0004,\n    AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS     = 0x0008,\n};\n/**\n * @}\n */\n\nstruct AVCodecInternal;\n\nenum AVFieldOrder {\n    AV_FIELD_UNKNOWN,\n    AV_FIELD_PROGRESSIVE,\n    AV_FIELD_TT,          //< Top coded_first, top displayed first\n    AV_FIELD_BB,          //< Bottom coded first, bottom displayed first\n    AV_FIELD_TB,          //< Top coded first, bottom displayed first\n    AV_FIELD_BT,          //< Bottom coded first, top displayed first\n};\n\n/**\n * main external API structure.\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user\n * applications.\n * sizeof(AVCodecContext) must not be used outside libav*.\n */\ntypedef struct AVCodecContext {\n    /**\n     * information on struct for av_log\n     * - set by avcodec_alloc_context3\n     */\n    const AVClass *av_class;\n    int log_level_offset;\n\n    enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */\n    const struct AVCodec  *codec;\n#if FF_API_CODEC_NAME\n    /**\n     * @deprecated this field is not used for anything in libavcodec\n     */\n    attribute_deprecated\n    char             codec_name[32];\n#endif\n    enum AVCodecID     codec_id; /* see AV_CODEC_ID_xxx */\n\n    /**\n     * fourcc (LSB first, so \"ABCD\" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').\n     * This is used to work around some encoder bugs.\n     * A demuxer should set this to what is stored in the field used to identify the codec.\n     * If there are multiple such fields in a container then the demuxer should choose the one\n     * which maximizes the information about the used codec.\n     * If the codec tag field in a container is larger than 32 bits then the demuxer should\n     * remap the longer ID to 32 bits with a table or other structure. Alternatively a new\n     * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated\n     * first.\n     * - encoding: Set by user, if not then the default based on codec_id will be used.\n     * - decoding: Set by user, will be converted to uppercase by libavcodec during init.\n     */\n    unsigned int codec_tag;\n\n#if FF_API_STREAM_CODEC_TAG\n    /**\n     * @deprecated this field is unused\n     */\n    attribute_deprecated\n    unsigned int stream_codec_tag;\n#endif\n\n    void *priv_data;\n\n    /**\n     * Private context used for internal data.\n     *\n     * Unlike priv_data, this is not codec-specific. It is used in general\n     * libavcodec functions.\n     */\n    struct AVCodecInternal *internal;\n\n    /**\n     * Private data of the user, can be used to carry app specific stuff.\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    void *opaque;\n\n    /**\n     * the average bitrate\n     * - encoding: Set by user; unused for constant quantizer encoding.\n     * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.\n     */\n    int bit_rate;\n\n    /**\n     * number of bits the bitstream is allowed to diverge from the reference.\n     *           the reference can be CBR (for CBR pass1) or VBR (for pass2)\n     * - encoding: Set by user; unused for constant quantizer encoding.\n     * - decoding: unused\n     */\n    int bit_rate_tolerance;\n\n    /**\n     * Global quality for codecs which cannot change it per frame.\n     * This should be proportional to MPEG-1/2/4 qscale.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int global_quality;\n\n    /**\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int compression_level;\n#define FF_COMPRESSION_DEFAULT -1\n\n    /**\n     * CODEC_FLAG_*.\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int flags;\n\n    /**\n     * CODEC_FLAG2_*\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int flags2;\n\n    /**\n     * some codecs need / can use extradata like Huffman tables.\n     * mjpeg: Huffman tables\n     * rv10: additional flags\n     * mpeg4: global headers (they can be in the bitstream or here)\n     * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger\n     * than extradata_size to avoid problems if it is read with the bitstream reader.\n     * The bytewise contents of extradata must not depend on the architecture or CPU endianness.\n     * - encoding: Set/allocated/freed by libavcodec.\n     * - decoding: Set/allocated/freed by user.\n     */\n    uint8_t *extradata;\n    int extradata_size;\n\n    /**\n     * This is the fundamental unit of time (in seconds) in terms\n     * of which frame timestamps are represented. For fixed-fps content,\n     * timebase should be 1/framerate and timestamp increments should be\n     * identically 1.\n     * This often, but not always is the inverse of the frame rate or field rate\n     * for video.\n     * - encoding: MUST be set by user.\n     * - decoding: the use of this field for decoding is deprecated.\n     *             Use framerate instead.\n     */\n    AVRational time_base;\n\n    /**\n     * For some codecs, the time base is closer to the field rate than the frame rate.\n     * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration\n     * if no telecine is used ...\n     *\n     * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.\n     */\n    int ticks_per_frame;\n\n    /**\n     * Codec delay.\n     *\n     * Encoding: Number of frames delay there will be from the encoder input to\n     *           the decoder output. (we assume the decoder matches the spec)\n     * Decoding: Number of frames delay in addition to what a standard decoder\n     *           as specified in the spec would produce.\n     *\n     * Video:\n     *   Number of frames the decoded output will be delayed relative to the\n     *   encoded input.\n     *\n     * Audio:\n     *   For encoding, this field is unused (see initial_padding).\n     *\n     *   For decoding, this is the number of samples the decoder needs to\n     *   output before the decoder's output is valid. When seeking, you should\n     *   start decoding this many samples prior to your desired seek point.\n     *\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by libavcodec.\n     */\n    int delay;\n\n\n    /* video only */\n    /**\n     * picture width / height.\n     * - encoding: MUST be set by user.\n     * - decoding: May be set by the user before opening the decoder if known e.g.\n     *             from the container. Some decoders will require the dimensions\n     *             to be set by the caller. During decoding, the decoder may\n     *             overwrite those values as required.\n     */\n    int width, height;\n\n    /**\n     * Bitstream width / height, may be different from width/height e.g. when\n     * the decoded frame is cropped before being output or lowres is enabled.\n     * - encoding: unused\n     * - decoding: May be set by the user before opening the decoder if known\n     *             e.g. from the container. During decoding, the decoder may\n     *             overwrite those values as required.\n     */\n    int coded_width, coded_height;\n\n#if FF_API_ASPECT_EXTENDED\n#define FF_ASPECT_EXTENDED 15\n#endif\n\n    /**\n     * the number of pictures in a group of pictures, or 0 for intra_only\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int gop_size;\n\n    /**\n     * Pixel format, see AV_PIX_FMT_xxx.\n     * May be set by the demuxer if known from headers.\n     * May be overridden by the decoder if it knows better.\n     * - encoding: Set by user.\n     * - decoding: Set by user if known, overridden by libavcodec if known\n     */\n    enum AVPixelFormat pix_fmt;\n\n    /**\n     * Motion estimation algorithm used for video coding.\n     * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),\n     * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific]\n     * - encoding: MUST be set by user.\n     * - decoding: unused\n     */\n    int me_method;\n\n    /**\n     * If non NULL, 'draw_horiz_band' is called by the libavcodec\n     * decoder to draw a horizontal band. It improves cache usage. Not\n     * all codecs can do that. You must check the codec capabilities\n     * beforehand.\n     * When multithreading is used, it may be called from multiple threads\n     * at the same time; threads might draw different parts of the same AVFrame,\n     * or multiple AVFrames, and there is no guarantee that slices will be drawn\n     * in order.\n     * The function is also used by hardware acceleration APIs.\n     * It is called at least once during frame decoding to pass\n     * the data needed for hardware render.\n     * In that mode instead of pixel data, AVFrame points to\n     * a structure specific to the acceleration API. The application\n     * reads the structure and can change some fields to indicate progress\n     * or mark state.\n     * - encoding: unused\n     * - decoding: Set by user.\n     * @param height the height of the slice\n     * @param y the y position of the slice\n     * @param type 1->top field, 2->bottom field, 3->frame\n     * @param offset offset into the AVFrame.data from which the slice should be read\n     */\n    void (*draw_horiz_band)(struct AVCodecContext *s,\n                            const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],\n                            int y, int type, int height);\n\n    /**\n     * callback to negotiate the pixelFormat\n     * @param fmt is the list of formats which are supported by the codec,\n     * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.\n     * The first is always the native one.\n     * @note The callback may be called again immediately if initialization for\n     * the selected (hardware-accelerated) pixel format failed.\n     * @warning Behavior is undefined if the callback returns a value not\n     * in the fmt list of formats.\n     * @return the chosen format\n     * - encoding: unused\n     * - decoding: Set by user, if not set the native format will be chosen.\n     */\n    enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);\n\n    /**\n     * maximum number of B-frames between non-B-frames\n     * Note: The output will be delayed by max_b_frames+1 relative to the input.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int max_b_frames;\n\n    /**\n     * qscale factor between IP and B-frames\n     * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).\n     * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float b_quant_factor;\n\n    /** obsolete FIXME remove */\n    int rc_strategy;\n#define FF_RC_STRATEGY_XVID 1\n\n    int b_frame_strategy;\n\n    /**\n     * qscale offset between IP and B-frames\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float b_quant_offset;\n\n    /**\n     * Size of the frame reordering buffer in the decoder.\n     * For MPEG-2 it is 1 IPB or 0 low delay IP.\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by libavcodec.\n     */\n    int has_b_frames;\n\n    /**\n     * 0-> h263 quant 1-> mpeg quant\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mpeg_quant;\n\n    /**\n     * qscale factor between P and I-frames\n     * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).\n     * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float i_quant_factor;\n\n    /**\n     * qscale offset between P and I-frames\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float i_quant_offset;\n\n    /**\n     * luminance masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float lumi_masking;\n\n    /**\n     * temporary complexity masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float temporal_cplx_masking;\n\n    /**\n     * spatial complexity masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float spatial_cplx_masking;\n\n    /**\n     * p block masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float p_masking;\n\n    /**\n     * darkness masking (0-> disabled)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    float dark_masking;\n\n    /**\n     * slice count\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by user (or 0).\n     */\n    int slice_count;\n    /**\n     * prediction method (needed for huffyuv)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n     int prediction_method;\n#define FF_PRED_LEFT   0\n#define FF_PRED_PLANE  1\n#define FF_PRED_MEDIAN 2\n\n    /**\n     * slice offsets in the frame in bytes\n     * - encoding: Set/allocated by libavcodec.\n     * - decoding: Set/allocated by user (or NULL).\n     */\n    int *slice_offset;\n\n    /**\n     * sample aspect ratio (0 if unknown)\n     * That is the width of a pixel divided by the height of the pixel.\n     * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    AVRational sample_aspect_ratio;\n\n    /**\n     * motion estimation comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_cmp;\n    /**\n     * subpixel motion estimation comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_sub_cmp;\n    /**\n     * macroblock comparison function (not supported yet)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_cmp;\n    /**\n     * interlaced DCT comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int ildct_cmp;\n#define FF_CMP_SAD    0\n#define FF_CMP_SSE    1\n#define FF_CMP_SATD   2\n#define FF_CMP_DCT    3\n#define FF_CMP_PSNR   4\n#define FF_CMP_BIT    5\n#define FF_CMP_RD     6\n#define FF_CMP_ZERO   7\n#define FF_CMP_VSAD   8\n#define FF_CMP_VSSE   9\n#define FF_CMP_NSSE   10\n#define FF_CMP_W53    11\n#define FF_CMP_W97    12\n#define FF_CMP_DCTMAX 13\n#define FF_CMP_DCT264 14\n#define FF_CMP_CHROMA 256\n\n    /**\n     * ME diamond size & shape\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int dia_size;\n\n    /**\n     * amount of previous MV predictors (2a+1 x 2a+1 square)\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int last_predictor_count;\n\n    /**\n     * prepass for motion estimation\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int pre_me;\n\n    /**\n     * motion estimation prepass comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_pre_cmp;\n\n    /**\n     * ME prepass diamond size & shape\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int pre_dia_size;\n\n    /**\n     * subpel ME quality\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_subpel_quality;\n\n#if FF_API_AFD\n    /**\n     * DTG active format information (additional aspect ratio\n     * information only used in DVB MPEG-2 transport streams)\n     * 0 if not set.\n     *\n     * - encoding: unused\n     * - decoding: Set by decoder.\n     * @deprecated Deprecated in favor of AVSideData\n     */\n    attribute_deprecated int dtg_active_format;\n#define FF_DTG_AFD_SAME         8\n#define FF_DTG_AFD_4_3          9\n#define FF_DTG_AFD_16_9         10\n#define FF_DTG_AFD_14_9         11\n#define FF_DTG_AFD_4_3_SP_14_9  13\n#define FF_DTG_AFD_16_9_SP_14_9 14\n#define FF_DTG_AFD_SP_4_3       15\n#endif /* FF_API_AFD */\n\n    /**\n     * maximum motion estimation search range in subpel units\n     * If 0 then no limit.\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_range;\n\n    /**\n     * intra quantizer bias\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int intra_quant_bias;\n#define FF_DEFAULT_QUANT_BIAS 999999\n\n    /**\n     * inter quantizer bias\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int inter_quant_bias;\n\n    /**\n     * slice flags\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int slice_flags;\n#define SLICE_FLAG_CODED_ORDER    0x0001 ///< draw_horiz_band() is called in coded order instead of display\n#define SLICE_FLAG_ALLOW_FIELD    0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)\n#define SLICE_FLAG_ALLOW_PLANE    0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)\n\n#if FF_API_XVMC\n    /**\n     * XVideo Motion Acceleration\n     * - encoding: forbidden\n     * - decoding: set by decoder\n     * @deprecated XvMC doesn't need it anymore.\n     */\n    attribute_deprecated int xvmc_acceleration;\n#endif /* FF_API_XVMC */\n\n    /**\n     * macroblock decision mode\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_decision;\n#define FF_MB_DECISION_SIMPLE 0        ///< uses mb_cmp\n#define FF_MB_DECISION_BITS   1        ///< chooses the one which needs the fewest bits\n#define FF_MB_DECISION_RD     2        ///< rate distortion\n\n    /**\n     * custom intra quantization matrix\n     * - encoding: Set by user, can be NULL.\n     * - decoding: Set by libavcodec.\n     */\n    uint16_t *intra_matrix;\n\n    /**\n     * custom inter quantization matrix\n     * - encoding: Set by user, can be NULL.\n     * - decoding: Set by libavcodec.\n     */\n    uint16_t *inter_matrix;\n\n    /**\n     * scene change detection threshold\n     * 0 is default, larger means fewer detected scene changes.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int scenechange_threshold;\n\n    /**\n     * noise reduction strength\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int noise_reduction;\n\n#if FF_API_MPV_OPT\n    /**\n     * @deprecated this field is unused\n     */\n    attribute_deprecated\n    int me_threshold;\n\n    /**\n     * @deprecated this field is unused\n     */\n    attribute_deprecated\n    int mb_threshold;\n#endif\n\n    /**\n     * precision of the intra DC coefficient - 8\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int intra_dc_precision;\n\n    /**\n     * Number of macroblock rows at the top which are skipped.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int skip_top;\n\n    /**\n     * Number of macroblock rows at the bottom which are skipped.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int skip_bottom;\n\n#if FF_API_MPV_OPT\n    /**\n     * @deprecated use encoder private options instead\n     */\n    attribute_deprecated\n    float border_masking;\n#endif\n\n    /**\n     * minimum MB lagrange multipler\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_lmin;\n\n    /**\n     * maximum MB lagrange multipler\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mb_lmax;\n\n    /**\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int me_penalty_compensation;\n\n    /**\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int bidir_refine;\n\n    /**\n     *\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int brd_scale;\n\n    /**\n     * minimum GOP size\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int keyint_min;\n\n    /**\n     * number of reference frames\n     * - encoding: Set by user.\n     * - decoding: Set by lavc.\n     */\n    int refs;\n\n    /**\n     * chroma qp offset from luma\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int chromaoffset;\n\n#if FF_API_UNUSED_MEMBERS\n    /**\n     * Multiplied by qscale for each frame and added to scene_change_score.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    attribute_deprecated int scenechange_factor;\n#endif\n\n    /**\n     *\n     * Note: Value depends upon the compare function used for fullpel ME.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int mv0_threshold;\n\n    /**\n     * Adjust sensitivity of b_frame_strategy 1.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int b_sensitivity;\n\n    /**\n     * Chromaticity coordinates of the source primaries.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorPrimaries color_primaries;\n\n    /**\n     * Color Transfer Characteristic.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorTransferCharacteristic color_trc;\n\n    /**\n     * YUV colorspace type.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorSpace colorspace;\n\n    /**\n     * MPEG vs JPEG YUV range.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorRange color_range;\n\n    /**\n     * This defines the location of chroma samples.\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVChromaLocation chroma_sample_location;\n\n    /**\n     * Number of slices.\n     * Indicates number of picture subdivisions. Used for parallelized\n     * decoding.\n     * - encoding: Set by user\n     * - decoding: unused\n     */\n    int slices;\n\n    /** Field order\n     * - encoding: set by libavcodec\n     * - decoding: Set by user.\n     */\n    enum AVFieldOrder field_order;\n\n    /* audio only */\n    int sample_rate; ///< samples per second\n    int channels;    ///< number of audio channels\n\n    /**\n     * audio sample format\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    enum AVSampleFormat sample_fmt;  ///< sample format\n\n    /* The following data should not be initialized. */\n    /**\n     * Number of samples per channel in an audio frame.\n     *\n     * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame\n     *   except the last must contain exactly frame_size samples per channel.\n     *   May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the\n     *   frame size is not restricted.\n     * - decoding: may be set by some decoders to indicate constant frame size\n     */\n    int frame_size;\n\n    /**\n     * Frame counter, set by libavcodec.\n     *\n     * - decoding: total number of frames returned from the decoder so far.\n     * - encoding: total number of frames passed to the encoder so far.\n     *\n     *   @note the counter is not incremented if encoding/decoding resulted in\n     *   an error.\n     */\n    int frame_number;\n\n    /**\n     * number of bytes per packet if constant and known or 0\n     * Used by some WAV based audio codecs.\n     */\n    int block_align;\n\n    /**\n     * Audio cutoff bandwidth (0 means \"automatic\")\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int cutoff;\n\n#if FF_API_REQUEST_CHANNELS\n    /**\n     * Decoder should decode to this many channels if it can (0 for default)\n     * - encoding: unused\n     * - decoding: Set by user.\n     * @deprecated Deprecated in favor of request_channel_layout.\n     */\n    attribute_deprecated int request_channels;\n#endif\n\n    /**\n     * Audio channel layout.\n     * - encoding: set by user.\n     * - decoding: set by user, may be overwritten by libavcodec.\n     */\n    uint64_t channel_layout;\n\n    /**\n     * Request decoder to use this channel layout if it can (0 for default)\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    uint64_t request_channel_layout;\n\n    /**\n     * Type of service that the audio stream conveys.\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    enum AVAudioServiceType audio_service_type;\n\n    /**\n     * desired sample format\n     * - encoding: Not used.\n     * - decoding: Set by user.\n     * Decoder will decode to this format if it can.\n     */\n    enum AVSampleFormat request_sample_fmt;\n\n#if FF_API_GET_BUFFER\n    /**\n     * Called at the beginning of each frame to get a buffer for it.\n     *\n     * The function will set AVFrame.data[], AVFrame.linesize[].\n     * AVFrame.extended_data[] must also be set, but it should be the same as\n     * AVFrame.data[] except for planar audio with more channels than can fit\n     * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as\n     * many data pointers as it can hold.\n     *\n     * if CODEC_CAP_DR1 is not set then get_buffer() must call\n     * avcodec_default_get_buffer() instead of providing buffers allocated by\n     * some other means.\n     *\n     * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't\n     * need it. avcodec_default_get_buffer() aligns the output buffer properly,\n     * but if get_buffer() is overridden then alignment considerations should\n     * be taken into account.\n     *\n     * @see avcodec_default_get_buffer()\n     *\n     * Video:\n     *\n     * If pic.reference is set then the frame will be read later by libavcodec.\n     * avcodec_align_dimensions2() should be used to find the required width and\n     * height, as they normally need to be rounded up to the next multiple of 16.\n     *\n     * If frame multithreading is used and thread_safe_callbacks is set,\n     * it may be called from a different thread, but not from more than one at\n     * once. Does not need to be reentrant.\n     *\n     * @see release_buffer(), reget_buffer()\n     * @see avcodec_align_dimensions2()\n     *\n     * Audio:\n     *\n     * Decoders request a buffer of a particular size by setting\n     * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,\n     * however, utilize only part of the buffer by setting AVFrame.nb_samples\n     * to a smaller value in the output frame.\n     *\n     * Decoders cannot use the buffer after returning from\n     * avcodec_decode_audio4(), so they will not call release_buffer(), as it\n     * is assumed to be released immediately upon return. In some rare cases,\n     * a decoder may need to call get_buffer() more than once in a single\n     * call to avcodec_decode_audio4(). In that case, when get_buffer() is\n     * called again after it has already been called once, the previously\n     * acquired buffer is assumed to be released at that time and may not be\n     * reused by the decoder.\n     *\n     * As a convenience, av_samples_get_buffer_size() and\n     * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()\n     * functions to find the required data size and to fill data pointers and\n     * linesize. In AVFrame.linesize, only linesize[0] may be set for audio\n     * since all planes must be the same size.\n     *\n     * @see av_samples_get_buffer_size(), av_samples_fill_arrays()\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec, user can override.\n     *\n     * @deprecated use get_buffer2()\n     */\n    attribute_deprecated\n    int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);\n\n    /**\n     * Called to release buffers which were allocated with get_buffer.\n     * A released buffer can be reused in get_buffer().\n     * pic.data[*] must be set to NULL.\n     * May be called from a different thread if frame multithreading is used,\n     * but not by more than one thread at once, so does not need to be reentrant.\n     * - encoding: unused\n     * - decoding: Set by libavcodec, user can override.\n     *\n     * @deprecated custom freeing callbacks should be set from get_buffer2()\n     */\n    attribute_deprecated\n    void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);\n\n    /**\n     * Called at the beginning of a frame to get cr buffer for it.\n     * Buffer type (size, hints) must be the same. libavcodec won't check it.\n     * libavcodec will pass previous buffer in pic, function should return\n     * same buffer or new buffer with old frame \"painted\" into it.\n     * If pic.data[0] == NULL must behave like get_buffer().\n     * if CODEC_CAP_DR1 is not set then reget_buffer() must call\n     * avcodec_default_reget_buffer() instead of providing buffers allocated by\n     * some other means.\n     * - encoding: unused\n     * - decoding: Set by libavcodec, user can override.\n     */\n    attribute_deprecated\n    int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);\n#endif\n\n    /**\n     * This callback is called at the beginning of each frame to get data\n     * buffer(s) for it. There may be one contiguous buffer for all the data or\n     * there may be a buffer per each data plane or anything in between. What\n     * this means is, you may set however many entries in buf[] you feel necessary.\n     * Each buffer must be reference-counted using the AVBuffer API (see description\n     * of buf[] below).\n     *\n     * The following fields will be set in the frame before this callback is\n     * called:\n     * - format\n     * - width, height (video only)\n     * - sample_rate, channel_layout, nb_samples (audio only)\n     * Their values may differ from the corresponding values in\n     * AVCodecContext. This callback must use the frame values, not the codec\n     * context values, to calculate the required buffer size.\n     *\n     * This callback must fill the following fields in the frame:\n     * - data[]\n     * - linesize[]\n     * - extended_data:\n     *   * if the data is planar audio with more than 8 channels, then this\n     *     callback must allocate and fill extended_data to contain all pointers\n     *     to all data planes. data[] must hold as many pointers as it can.\n     *     extended_data must be allocated with av_malloc() and will be freed in\n     *     av_frame_unref().\n     *   * otherwise exended_data must point to data\n     * - buf[] must contain one or more pointers to AVBufferRef structures. Each of\n     *   the frame's data and extended_data pointers must be contained in these. That\n     *   is, one AVBufferRef for each allocated chunk of memory, not necessarily one\n     *   AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(),\n     *   and av_buffer_ref().\n     * - extended_buf and nb_extended_buf must be allocated with av_malloc() by\n     *   this callback and filled with the extra buffers if there are more\n     *   buffers than buf[] can hold. extended_buf will be freed in\n     *   av_frame_unref().\n     *\n     * If CODEC_CAP_DR1 is not set then get_buffer2() must call\n     * avcodec_default_get_buffer2() instead of providing buffers allocated by\n     * some other means.\n     *\n     * Each data plane must be aligned to the maximum required by the target\n     * CPU.\n     *\n     * @see avcodec_default_get_buffer2()\n     *\n     * Video:\n     *\n     * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused\n     * (read and/or written to if it is writable) later by libavcodec.\n     *\n     * avcodec_align_dimensions2() should be used to find the required width and\n     * height, as they normally need to be rounded up to the next multiple of 16.\n     *\n     * Some decoders do not support linesizes changing between frames.\n     *\n     * If frame multithreading is used and thread_safe_callbacks is set,\n     * this callback may be called from a different thread, but not from more\n     * than one at once. Does not need to be reentrant.\n     *\n     * @see avcodec_align_dimensions2()\n     *\n     * Audio:\n     *\n     * Decoders request a buffer of a particular size by setting\n     * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,\n     * however, utilize only part of the buffer by setting AVFrame.nb_samples\n     * to a smaller value in the output frame.\n     *\n     * As a convenience, av_samples_get_buffer_size() and\n     * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()\n     * functions to find the required data size and to fill data pointers and\n     * linesize. In AVFrame.linesize, only linesize[0] may be set for audio\n     * since all planes must be the same size.\n     *\n     * @see av_samples_get_buffer_size(), av_samples_fill_arrays()\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec, user can override.\n     */\n    int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);\n\n    /**\n     * If non-zero, the decoded audio and video frames returned from\n     * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted\n     * and are valid indefinitely. The caller must free them with\n     * av_frame_unref() when they are not needed anymore.\n     * Otherwise, the decoded frames must not be freed by the caller and are\n     * only valid until the next decode call.\n     *\n     * - encoding: unused\n     * - decoding: set by the caller before avcodec_open2().\n     */\n    int refcounted_frames;\n\n    /* - encoding parameters */\n    float qcompress;  ///< amount of qscale change between easy & hard scenes (0.0-1.0)\n    float qblur;      ///< amount of qscale smoothing over time (0.0-1.0)\n\n    /**\n     * minimum quantizer\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int qmin;\n\n    /**\n     * maximum quantizer\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int qmax;\n\n    /**\n     * maximum quantizer difference between frames\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int max_qdiff;\n\n#if FF_API_MPV_OPT\n    /**\n     * @deprecated use encoder private options instead\n     */\n    attribute_deprecated\n    float rc_qsquish;\n\n    attribute_deprecated\n    float rc_qmod_amp;\n    attribute_deprecated\n    int rc_qmod_freq;\n#endif\n\n    /**\n     * decoder bitstream buffer size\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int rc_buffer_size;\n\n    /**\n     * ratecontrol override, see RcOverride\n     * - encoding: Allocated/set/freed by user.\n     * - decoding: unused\n     */\n    int rc_override_count;\n    RcOverride *rc_override;\n\n#if FF_API_MPV_OPT\n    /**\n     * @deprecated use encoder private options instead\n     */\n    attribute_deprecated\n    const char *rc_eq;\n#endif\n\n    /**\n     * maximum bitrate\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    int rc_max_rate;\n\n    /**\n     * minimum bitrate\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int rc_min_rate;\n\n#if FF_API_MPV_OPT\n    /**\n     * @deprecated use encoder private options instead\n     */\n    attribute_deprecated\n    float rc_buffer_aggressivity;\n\n    attribute_deprecated\n    float rc_initial_cplx;\n#endif\n\n    /**\n     * Ratecontrol attempt to use, at maximum, <value> of what can be used without an underflow.\n     * - encoding: Set by user.\n     * - decoding: unused.\n     */\n    float rc_max_available_vbv_use;\n\n    /**\n     * Ratecontrol attempt to use, at least, <value> times the amount needed to prevent a vbv overflow.\n     * - encoding: Set by user.\n     * - decoding: unused.\n     */\n    float rc_min_vbv_overflow_use;\n\n    /**\n     * Number of bits which should be loaded into the rc buffer before decoding starts.\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int rc_initial_buffer_occupancy;\n\n#define FF_CODER_TYPE_VLC       0\n#define FF_CODER_TYPE_AC        1\n#define FF_CODER_TYPE_RAW       2\n#define FF_CODER_TYPE_RLE       3\n#if FF_API_UNUSED_MEMBERS\n#define FF_CODER_TYPE_DEFLATE   4\n#endif /* FF_API_UNUSED_MEMBERS */\n    /**\n     * coder type\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int coder_type;\n\n    /**\n     * context model\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int context_model;\n\n#if FF_API_MPV_OPT\n    /**\n     * @deprecated use encoder private options instead\n     */\n    attribute_deprecated\n    int lmin;\n\n    /**\n     * @deprecated use encoder private options instead\n     */\n    attribute_deprecated\n    int lmax;\n#endif\n\n    /**\n     * frame skip threshold\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int frame_skip_threshold;\n\n    /**\n     * frame skip factor\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int frame_skip_factor;\n\n    /**\n     * frame skip exponent\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int frame_skip_exp;\n\n    /**\n     * frame skip comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int frame_skip_cmp;\n\n    /**\n     * trellis RD quantization\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int trellis;\n\n    /**\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int min_prediction_order;\n\n    /**\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int max_prediction_order;\n\n    /**\n     * GOP timecode frame start number\n     * - encoding: Set by user, in non drop frame format\n     * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset)\n     */\n    int64_t timecode_frame_start;\n\n    /* The RTP callback: This function is called    */\n    /* every time the encoder has a packet to send. */\n    /* It depends on the encoder if the data starts */\n    /* with a Start Code (it should). H.263 does.   */\n    /* mb_nb contains the number of macroblocks     */\n    /* encoded in the RTP payload.                  */\n    void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);\n\n    int rtp_payload_size;   /* The size of the RTP payload: the coder will  */\n                            /* do its best to deliver a chunk with size     */\n                            /* below rtp_payload_size, the chunk will start */\n                            /* with a start code on some codecs like H.263. */\n                            /* This doesn't take account of any particular  */\n                            /* headers inside the transmitted RTP payload.  */\n\n    /* statistics, used for 2-pass encoding */\n    int mv_bits;\n    int header_bits;\n    int i_tex_bits;\n    int p_tex_bits;\n    int i_count;\n    int p_count;\n    int skip_count;\n    int misc_bits;\n\n    /**\n     * number of bits used for the previously encoded frame\n     * - encoding: Set by libavcodec.\n     * - decoding: unused\n     */\n    int frame_bits;\n\n    /**\n     * pass1 encoding statistics output buffer\n     * - encoding: Set by libavcodec.\n     * - decoding: unused\n     */\n    char *stats_out;\n\n    /**\n     * pass2 encoding statistics input buffer\n     * Concatenated stuff from stats_out of pass1 should be placed here.\n     * - encoding: Allocated/set/freed by user.\n     * - decoding: unused\n     */\n    char *stats_in;\n\n    /**\n     * Work around bugs in encoders which sometimes cannot be detected automatically.\n     * - encoding: Set by user\n     * - decoding: Set by user\n     */\n    int workaround_bugs;\n#define FF_BUG_AUTODETECT       1  ///< autodetection\n#if FF_API_OLD_MSMPEG4\n#define FF_BUG_OLD_MSMPEG4      2\n#endif\n#define FF_BUG_XVID_ILACE       4\n#define FF_BUG_UMP4             8\n#define FF_BUG_NO_PADDING       16\n#define FF_BUG_AMV              32\n#if FF_API_AC_VLC\n#define FF_BUG_AC_VLC           0  ///< Will be removed, libavcodec can now handle these non-compliant files by default.\n#endif\n#define FF_BUG_QPEL_CHROMA      64\n#define FF_BUG_STD_QPEL         128\n#define FF_BUG_QPEL_CHROMA2     256\n#define FF_BUG_DIRECT_BLOCKSIZE 512\n#define FF_BUG_EDGE             1024\n#define FF_BUG_HPEL_CHROMA      2048\n#define FF_BUG_DC_CLIP          4096\n#define FF_BUG_MS               8192 ///< Work around various bugs in Microsoft's broken decoders.\n#define FF_BUG_TRUNCATED       16384\n\n    /**\n     * strictly follow the standard (MPEG4, ...).\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     * Setting this to STRICT or higher means the encoder and decoder will\n     * generally do stupid things, whereas setting it to unofficial or lower\n     * will mean the encoder might produce output that is not supported by all\n     * spec-compliant decoders. Decoders don't differentiate between normal,\n     * unofficial and experimental (that is, they always try to decode things\n     * when they can) unless they are explicitly asked to behave stupidly\n     * (=strictly conform to the specs)\n     */\n    int strict_std_compliance;\n#define FF_COMPLIANCE_VERY_STRICT   2 ///< Strictly conform to an older more strict version of the spec or reference software.\n#define FF_COMPLIANCE_STRICT        1 ///< Strictly conform to all the things in the spec no matter what consequences.\n#define FF_COMPLIANCE_NORMAL        0\n#define FF_COMPLIANCE_UNOFFICIAL   -1 ///< Allow unofficial extensions\n#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.\n\n    /**\n     * error concealment flags\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int error_concealment;\n#define FF_EC_GUESS_MVS   1\n#define FF_EC_DEBLOCK     2\n#define FF_EC_FAVOR_INTER 256\n\n    /**\n     * debug\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int debug;\n#define FF_DEBUG_PICT_INFO   1\n#define FF_DEBUG_RC          2\n#define FF_DEBUG_BITSTREAM   4\n#define FF_DEBUG_MB_TYPE     8\n#define FF_DEBUG_QP          16\n#if FF_API_DEBUG_MV\n/**\n * @deprecated this option does nothing\n */\n#define FF_DEBUG_MV          32\n#endif\n#define FF_DEBUG_DCT_COEFF   0x00000040\n#define FF_DEBUG_SKIP        0x00000080\n#define FF_DEBUG_STARTCODE   0x00000100\n#if FF_API_UNUSED_MEMBERS\n#define FF_DEBUG_PTS         0x00000200\n#endif /* FF_API_UNUSED_MEMBERS */\n#define FF_DEBUG_ER          0x00000400\n#define FF_DEBUG_MMCO        0x00000800\n#define FF_DEBUG_BUGS        0x00001000\n#if FF_API_DEBUG_MV\n#define FF_DEBUG_VIS_QP      0x00002000 ///< only access through AVOptions from outside libavcodec\n#define FF_DEBUG_VIS_MB_TYPE 0x00004000 ///< only access through AVOptions from outside libavcodec\n#endif\n#define FF_DEBUG_BUFFERS     0x00008000\n#define FF_DEBUG_THREADS     0x00010000\n#define FF_DEBUG_NOMC        0x01000000\n\n#if FF_API_DEBUG_MV\n    /**\n     * debug\n     * Code outside libavcodec should access this field using AVOptions\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int debug_mv;\n#define FF_DEBUG_VIS_MV_P_FOR  0x00000001 //visualize forward predicted MVs of P frames\n#define FF_DEBUG_VIS_MV_B_FOR  0x00000002 //visualize forward predicted MVs of B frames\n#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames\n#endif\n\n    /**\n     * Error recognition; may misdetect some more or less valid parts as errors.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int err_recognition;\n\n/**\n * Verify checksums embedded in the bitstream (could be of either encoded or\n * decoded data, depending on the codec) and print an error message on mismatch.\n * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the\n * decoder returning an error.\n */\n#define AV_EF_CRCCHECK  (1<<0)\n#define AV_EF_BITSTREAM (1<<1)          ///< detect bitstream specification deviations\n#define AV_EF_BUFFER    (1<<2)          ///< detect improper bitstream length\n#define AV_EF_EXPLODE   (1<<3)          ///< abort decoding on minor error detection\n\n#define AV_EF_IGNORE_ERR (1<<15)        ///< ignore errors and continue\n#define AV_EF_CAREFUL    (1<<16)        ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors\n#define AV_EF_COMPLIANT  (1<<17)        ///< consider all spec non compliances as errors\n#define AV_EF_AGGRESSIVE (1<<18)        ///< consider things that a sane encoder should not do as an error\n\n\n    /**\n     * opaque 64bit number (generally a PTS) that will be reordered and\n     * output in AVFrame.reordered_opaque\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    int64_t reordered_opaque;\n\n    /**\n     * Hardware accelerator in use\n     * - encoding: unused.\n     * - decoding: Set by libavcodec\n     */\n    struct AVHWAccel *hwaccel;\n\n    /**\n     * Hardware accelerator context.\n     * For some hardware accelerators, a global context needs to be\n     * provided by the user. In that case, this holds display-dependent\n     * data FFmpeg cannot instantiate itself. Please refer to the\n     * FFmpeg HW accelerator documentation to know how to fill this\n     * is. e.g. for VA API, this is a struct vaapi_context.\n     * - encoding: unused\n     * - decoding: Set by user\n     */\n    void *hwaccel_context;\n\n    /**\n     * error\n     * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.\n     * - decoding: unused\n     */\n    uint64_t error[AV_NUM_DATA_POINTERS];\n\n    /**\n     * DCT algorithm, see FF_DCT_* below\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n    int dct_algo;\n#define FF_DCT_AUTO    0\n#define FF_DCT_FASTINT 1\n#if FF_API_UNUSED_MEMBERS\n#define FF_DCT_INT     2\n#endif /* FF_API_UNUSED_MEMBERS */\n#define FF_DCT_MMX     3\n#define FF_DCT_ALTIVEC 5\n#define FF_DCT_FAAN    6\n\n    /**\n     * IDCT algorithm, see FF_IDCT_* below.\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int idct_algo;\n#define FF_IDCT_AUTO          0\n#define FF_IDCT_INT           1\n#define FF_IDCT_SIMPLE        2\n#define FF_IDCT_SIMPLEMMX     3\n#define FF_IDCT_ARM           7\n#define FF_IDCT_ALTIVEC       8\n#if FF_API_ARCH_SH4\n#define FF_IDCT_SH4           9\n#endif\n#define FF_IDCT_SIMPLEARM     10\n#if FF_API_UNUSED_MEMBERS\n#define FF_IDCT_IPP           13\n#endif /* FF_API_UNUSED_MEMBERS */\n#define FF_IDCT_XVID          14\n#if FF_API_IDCT_XVIDMMX\n#define FF_IDCT_XVIDMMX       14\n#endif /* FF_API_IDCT_XVIDMMX */\n#define FF_IDCT_SIMPLEARMV5TE 16\n#define FF_IDCT_SIMPLEARMV6   17\n#if FF_API_ARCH_SPARC\n#define FF_IDCT_SIMPLEVIS     18\n#endif\n#define FF_IDCT_FAAN          20\n#define FF_IDCT_SIMPLENEON    22\n#if FF_API_ARCH_ALPHA\n#define FF_IDCT_SIMPLEALPHA   23\n#endif\n#define FF_IDCT_SIMPLEAUTO    128\n\n    /**\n     * bits per sample/pixel from the demuxer (needed for huffyuv).\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by user.\n     */\n     int bits_per_coded_sample;\n\n    /**\n     * Bits per sample/pixel of internal libavcodec pixel/sample format.\n     * - encoding: set by user.\n     * - decoding: set by libavcodec.\n     */\n    int bits_per_raw_sample;\n\n#if FF_API_LOWRES\n    /**\n     * low resolution decoding, 1-> 1/2 size, 2->1/4 size\n     * - encoding: unused\n     * - decoding: Set by user.\n     * Code outside libavcodec should access this field using:\n     * av_codec_{get,set}_lowres(avctx)\n     */\n     int lowres;\n#endif\n\n    /**\n     * the picture in the bitstream\n     * - encoding: Set by libavcodec.\n     * - decoding: unused\n     */\n    AVFrame *coded_frame;\n\n    /**\n     * thread count\n     * is used to decide how many independent tasks should be passed to execute()\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int thread_count;\n\n    /**\n     * Which multithreading methods to use.\n     * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,\n     * so clients which cannot provide future frames should not use it.\n     *\n     * - encoding: Set by user, otherwise the default is used.\n     * - decoding: Set by user, otherwise the default is used.\n     */\n    int thread_type;\n#define FF_THREAD_FRAME   1 ///< Decode more than one frame at once\n#define FF_THREAD_SLICE   2 ///< Decode more than one part of a single frame at once\n\n    /**\n     * Which multithreading methods are in use by the codec.\n     * - encoding: Set by libavcodec.\n     * - decoding: Set by libavcodec.\n     */\n    int active_thread_type;\n\n    /**\n     * Set by the client if its custom get_buffer() callback can be called\n     * synchronously from another thread, which allows faster multithreaded decoding.\n     * draw_horiz_band() will be called from other threads regardless of this setting.\n     * Ignored if the default get_buffer() is used.\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int thread_safe_callbacks;\n\n    /**\n     * The codec may call this to execute several independent things.\n     * It will return only after finishing all tasks.\n     * The user may replace this with some multithreaded implementation,\n     * the default implementation will execute the parts serially.\n     * @param count the number of things to execute\n     * - encoding: Set by libavcodec, user can override.\n     * - decoding: Set by libavcodec, user can override.\n     */\n    int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);\n\n    /**\n     * The codec may call this to execute several independent things.\n     * It will return only after finishing all tasks.\n     * The user may replace this with some multithreaded implementation,\n     * the default implementation will execute the parts serially.\n     * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.\n     * @param c context passed also to func\n     * @param count the number of things to execute\n     * @param arg2 argument passed unchanged to func\n     * @param ret return values of executed functions, must have space for \"count\" values. May be NULL.\n     * @param func function that will be called count times, with jobnr from 0 to count-1.\n     *             threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no\n     *             two instances of func executing at the same time will have the same threadnr.\n     * @return always 0 currently, but code should handle a future improvement where when any call to func\n     *         returns < 0 no further calls to func may be done and < 0 is returned.\n     * - encoding: Set by libavcodec, user can override.\n     * - decoding: Set by libavcodec, user can override.\n     */\n    int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);\n\n#if FF_API_THREAD_OPAQUE\n    /**\n     * @deprecated this field should not be used from outside of lavc\n     */\n    attribute_deprecated\n    void *thread_opaque;\n#endif\n\n    /**\n     * noise vs. sse weight for the nsse comparison function\n     * - encoding: Set by user.\n     * - decoding: unused\n     */\n     int nsse_weight;\n\n    /**\n     * profile\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n     int profile;\n#define FF_PROFILE_UNKNOWN -99\n#define FF_PROFILE_RESERVED -100\n\n#define FF_PROFILE_AAC_MAIN 0\n#define FF_PROFILE_AAC_LOW  1\n#define FF_PROFILE_AAC_SSR  2\n#define FF_PROFILE_AAC_LTP  3\n#define FF_PROFILE_AAC_HE   4\n#define FF_PROFILE_AAC_HE_V2 28\n#define FF_PROFILE_AAC_LD   22\n#define FF_PROFILE_AAC_ELD  38\n#define FF_PROFILE_MPEG2_AAC_LOW 128\n#define FF_PROFILE_MPEG2_AAC_HE  131\n\n#define FF_PROFILE_DTS         20\n#define FF_PROFILE_DTS_ES      30\n#define FF_PROFILE_DTS_96_24   40\n#define FF_PROFILE_DTS_HD_HRA  50\n#define FF_PROFILE_DTS_HD_MA   60\n\n#define FF_PROFILE_MPEG2_422    0\n#define FF_PROFILE_MPEG2_HIGH   1\n#define FF_PROFILE_MPEG2_SS     2\n#define FF_PROFILE_MPEG2_SNR_SCALABLE  3\n#define FF_PROFILE_MPEG2_MAIN   4\n#define FF_PROFILE_MPEG2_SIMPLE 5\n\n#define FF_PROFILE_H264_CONSTRAINED  (1<<9)  // 8+1; constraint_set1_flag\n#define FF_PROFILE_H264_INTRA        (1<<11) // 8+3; constraint_set3_flag\n\n#define FF_PROFILE_H264_BASELINE             66\n#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)\n#define FF_PROFILE_H264_MAIN                 77\n#define FF_PROFILE_H264_EXTENDED             88\n#define FF_PROFILE_H264_HIGH                 100\n#define FF_PROFILE_H264_HIGH_10              110\n#define FF_PROFILE_H264_HIGH_10_INTRA        (110|FF_PROFILE_H264_INTRA)\n#define FF_PROFILE_H264_HIGH_422             122\n#define FF_PROFILE_H264_HIGH_422_INTRA       (122|FF_PROFILE_H264_INTRA)\n#define FF_PROFILE_H264_HIGH_444             144\n#define FF_PROFILE_H264_HIGH_444_PREDICTIVE  244\n#define FF_PROFILE_H264_HIGH_444_INTRA       (244|FF_PROFILE_H264_INTRA)\n#define FF_PROFILE_H264_CAVLC_444            44\n\n#define FF_PROFILE_VC1_SIMPLE   0\n#define FF_PROFILE_VC1_MAIN     1\n#define FF_PROFILE_VC1_COMPLEX  2\n#define FF_PROFILE_VC1_ADVANCED 3\n\n#define FF_PROFILE_MPEG4_SIMPLE                     0\n#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE            1\n#define FF_PROFILE_MPEG4_CORE                       2\n#define FF_PROFILE_MPEG4_MAIN                       3\n#define FF_PROFILE_MPEG4_N_BIT                      4\n#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE           5\n#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION      6\n#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE     7\n#define FF_PROFILE_MPEG4_HYBRID                     8\n#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME         9\n#define FF_PROFILE_MPEG4_CORE_SCALABLE             10\n#define FF_PROFILE_MPEG4_ADVANCED_CODING           11\n#define FF_PROFILE_MPEG4_ADVANCED_CORE             12\n#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13\n#define FF_PROFILE_MPEG4_SIMPLE_STUDIO             14\n#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE           15\n\n#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0   0\n#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1   1\n#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION  2\n#define FF_PROFILE_JPEG2000_DCINEMA_2K              3\n#define FF_PROFILE_JPEG2000_DCINEMA_4K              4\n\n\n#define FF_PROFILE_HEVC_MAIN                        1\n#define FF_PROFILE_HEVC_MAIN_10                     2\n#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE          3\n#define FF_PROFILE_HEVC_REXT                        4\n\n    /**\n     * level\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n     int level;\n#define FF_LEVEL_UNKNOWN -99\n\n    /**\n     * Skip loop filtering for selected frames.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    enum AVDiscard skip_loop_filter;\n\n    /**\n     * Skip IDCT/dequantization for selected frames.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    enum AVDiscard skip_idct;\n\n    /**\n     * Skip decoding for selected frames.\n     * - encoding: unused\n     * - decoding: Set by user.\n     */\n    enum AVDiscard skip_frame;\n\n    /**\n     * Header containing style information for text subtitles.\n     * For SUBTITLE_ASS subtitle type, it should contain the whole ASS\n     * [Script Info] and [V4+ Styles] section, plus the [Events] line and\n     * the Format line following. It shouldn't include any Dialogue line.\n     * - encoding: Set/allocated/freed by user (before avcodec_open2())\n     * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())\n     */\n    uint8_t *subtitle_header;\n    int subtitle_header_size;\n\n#if FF_API_ERROR_RATE\n    /**\n     * @deprecated use the 'error_rate' private AVOption of the mpegvideo\n     * encoders\n     */\n    attribute_deprecated\n    int error_rate;\n#endif\n\n#if FF_API_CODEC_PKT\n    /**\n     * @deprecated this field is not supposed to be accessed from outside lavc\n     */\n    attribute_deprecated\n    AVPacket *pkt;\n#endif\n\n    /**\n     * VBV delay coded in the last frame (in periods of a 27 MHz clock).\n     * Used for compliant TS muxing.\n     * - encoding: Set by libavcodec.\n     * - decoding: unused.\n     */\n    uint64_t vbv_delay;\n\n    /**\n     * Encoding only. Allow encoders to output packets that do not contain any\n     * encoded data, only side data.\n     *\n     * Some encoders need to output such packets, e.g. to update some stream\n     * parameters at the end of encoding.\n     *\n     * All callers are strongly recommended to set this option to 1 and update\n     * their code to deal with such packets, since this behaviour may become\n     * always enabled in the future (then this option will be deprecated and\n     * later removed). To avoid ABI issues when this happens, the callers should\n     * use AVOptions to set this field.\n     */\n    int side_data_only_packets;\n\n    /**\n     * Audio only. The number of \"priming\" samples (padding) inserted by the\n     * encoder at the beginning of the audio. I.e. this number of leading\n     * decoded samples must be discarded by the caller to get the original audio\n     * without leading padding.\n     *\n     * - decoding: unused\n     * - encoding: Set by libavcodec. The timestamps on the output packets are\n     *             adjusted by the encoder so that they always refer to the\n     *             first sample of the data actually contained in the packet,\n     *             including any added padding.  E.g. if the timebase is\n     *             1/samplerate and the timestamp of the first input sample is\n     *             0, the timestamp of the first output packet will be\n     *             -initial_padding.\n     */\n    int initial_padding;\n\n    /**\n     * - decoding: For codecs that store a framerate value in the compressed\n     *             bitstream, the decoder may export it here. { 0, 1} when\n     *             unknown.\n     * - encoding: unused\n     */\n    AVRational framerate;\n\n    /**\n     * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.\n     * - encoding: unused.\n     * - decoding: Set by libavcodec before calling get_format()\n     */\n    enum AVPixelFormat sw_pix_fmt;\n\n    /**\n     * Timebase in which pkt_dts/pts and AVPacket.dts/pts are.\n     * Code outside libavcodec should access this field using:\n     * av_codec_{get,set}_pkt_timebase(avctx)\n     * - encoding unused.\n     * - decoding set by user.\n     */\n    AVRational pkt_timebase;\n\n    /**\n     * AVCodecDescriptor\n     * Code outside libavcodec should access this field using:\n     * av_codec_{get,set}_codec_descriptor(avctx)\n     * - encoding: unused.\n     * - decoding: set by libavcodec.\n     */\n    const AVCodecDescriptor *codec_descriptor;\n\n#if !FF_API_LOWRES\n    /**\n     * low resolution decoding, 1-> 1/2 size, 2->1/4 size\n     * - encoding: unused\n     * - decoding: Set by user.\n     * Code outside libavcodec should access this field using:\n     * av_codec_{get,set}_lowres(avctx)\n     */\n     int lowres;\n#endif\n\n    /**\n     * Current statistics for PTS correction.\n     * - decoding: maintained and used by libavcodec, not intended to be used by user apps\n     * - encoding: unused\n     */\n    int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far\n    int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far\n    int64_t pts_correction_last_pts;       /// PTS of the last frame\n    int64_t pts_correction_last_dts;       /// DTS of the last frame\n\n    /**\n     * Character encoding of the input subtitles file.\n     * - decoding: set by user\n     * - encoding: unused\n     */\n    char *sub_charenc;\n\n    /**\n     * Subtitles character encoding mode. Formats or codecs might be adjusting\n     * this setting (if they are doing the conversion themselves for instance).\n     * - decoding: set by libavcodec\n     * - encoding: unused\n     */\n    int sub_charenc_mode;\n#define FF_SUB_CHARENC_MODE_DO_NOTHING  -1  ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance)\n#define FF_SUB_CHARENC_MODE_AUTOMATIC    0  ///< libavcodec will select the mode itself\n#define FF_SUB_CHARENC_MODE_PRE_DECODER  1  ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv\n\n    /**\n     * Skip processing alpha if supported by codec.\n     * Note that if the format uses pre-multiplied alpha (common with VP6,\n     * and recommended due to better video quality/compression)\n     * the image will look as if alpha-blended onto a black background.\n     * However for formats that do not use pre-multiplied alpha\n     * there might be serious artefacts (though e.g. libswscale currently\n     * assumes pre-multiplied alpha anyway).\n     * Code outside libavcodec should access this field using AVOptions\n     *\n     * - decoding: set by user\n     * - encoding: unused\n     */\n    int skip_alpha;\n\n    /**\n     * Number of samples to skip after a discontinuity\n     * - decoding: unused\n     * - encoding: set by libavcodec\n     */\n    int seek_preroll;\n\n#if !FF_API_DEBUG_MV\n    /**\n     * debug motion vectors\n     * Code outside libavcodec should access this field using AVOptions\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    int debug_mv;\n#define FF_DEBUG_VIS_MV_P_FOR  0x00000001 //visualize forward predicted MVs of P frames\n#define FF_DEBUG_VIS_MV_B_FOR  0x00000002 //visualize forward predicted MVs of B frames\n#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames\n#endif\n\n    /**\n     * custom intra quantization matrix\n     * Code outside libavcodec should access this field using av_codec_g/set_chroma_intra_matrix()\n     * - encoding: Set by user, can be NULL.\n     * - decoding: unused.\n     */\n    uint16_t *chroma_intra_matrix;\n\n    /**\n     * dump format separator.\n     * can be \", \" or \"\\n      \" or anything else\n     * Code outside libavcodec should access this field using AVOptions\n     * (NO direct access).\n     * - encoding: Set by user.\n     * - decoding: Set by user.\n     */\n    uint8_t *dump_separator;\n\n    /**\n     * ',' separated list of allowed decoders.\n     * If NULL then all are allowed\n     * - encoding: unused\n     * - decoding: set by user through AVOPtions (NO direct access)\n     */\n    char *codec_whitelist;\n} AVCodecContext;\n\nAVRational av_codec_get_pkt_timebase         (const AVCodecContext *avctx);\nvoid       av_codec_set_pkt_timebase         (AVCodecContext *avctx, AVRational val);\n\nconst AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx);\nvoid                     av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc);\n\nint  av_codec_get_lowres(const AVCodecContext *avctx);\nvoid av_codec_set_lowres(AVCodecContext *avctx, int val);\n\nint  av_codec_get_seek_preroll(const AVCodecContext *avctx);\nvoid av_codec_set_seek_preroll(AVCodecContext *avctx, int val);\n\nuint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx);\nvoid av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val);\n\n/**\n * AVProfile.\n */\ntypedef struct AVProfile {\n    int profile;\n    const char *name; ///< short name for the profile\n} AVProfile;\n\ntypedef struct AVCodecDefault AVCodecDefault;\n\nstruct AVSubtitle;\n\n/**\n * AVCodec.\n */\ntypedef struct AVCodec {\n    /**\n     * Name of the codec implementation.\n     * The name is globally unique among encoders and among decoders (but an\n     * encoder and a decoder can share the same name).\n     * This is the primary way to find a codec from the user perspective.\n     */\n    const char *name;\n    /**\n     * Descriptive name for the codec, meant to be more human readable than name.\n     * You should use the NULL_IF_CONFIG_SMALL() macro to define it.\n     */\n    const char *long_name;\n    enum AVMediaType type;\n    enum AVCodecID id;\n    /**\n     * Codec capabilities.\n     * see CODEC_CAP_*\n     */\n    int capabilities;\n    const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}\n    const enum AVPixelFormat *pix_fmts;     ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1\n    const int *supported_samplerates;       ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0\n    const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1\n    const uint64_t *channel_layouts;         ///< array of support channel layouts, or NULL if unknown. array is terminated by 0\n#if FF_API_LOWRES\n    uint8_t max_lowres;                     ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres()\n#endif\n    const AVClass *priv_class;              ///< AVClass for the private context\n    const AVProfile *profiles;              ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}\n\n    /*****************************************************************\n     * No fields below this line are part of the public API. They\n     * may not be used outside of libavcodec and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    int priv_data_size;\n    struct AVCodec *next;\n    /**\n     * @name Frame-level threading support functions\n     * @{\n     */\n    /**\n     * If defined, called on thread contexts when they are created.\n     * If the codec allocates writable tables in init(), re-allocate them here.\n     * priv_data will be set to a copy of the original.\n     */\n    int (*init_thread_copy)(AVCodecContext *);\n    /**\n     * Copy necessary context variables from a previous thread context to the current one.\n     * If not defined, the next thread will start automatically; otherwise, the codec\n     * must call ff_thread_finish_setup().\n     *\n     * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.\n     */\n    int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);\n    /** @} */\n\n    /**\n     * Private codec-specific defaults.\n     */\n    const AVCodecDefault *defaults;\n\n    /**\n     * Initialize codec static data, called from avcodec_register().\n     */\n    void (*init_static_data)(struct AVCodec *codec);\n\n    int (*init)(AVCodecContext *);\n    int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size,\n                      const struct AVSubtitle *sub);\n    /**\n     * Encode data to an AVPacket.\n     *\n     * @param      avctx          codec context\n     * @param      avpkt          output AVPacket (may contain a user-provided buffer)\n     * @param[in]  frame          AVFrame containing the raw data to be encoded\n     * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a\n     *                            non-empty packet was returned in avpkt.\n     * @return 0 on success, negative error code on failure\n     */\n    int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,\n                   int *got_packet_ptr);\n    int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);\n    int (*close)(AVCodecContext *);\n    /**\n     * Flush buffers.\n     * Will be called when seeking\n     */\n    void (*flush)(AVCodecContext *);\n} AVCodec;\n\nint av_codec_get_max_lowres(const AVCodec *codec);\n\nstruct MpegEncContext;\n\n/**\n * @defgroup lavc_hwaccel AVHWAccel\n * @{\n */\ntypedef struct AVHWAccel {\n    /**\n     * Name of the hardware accelerated codec.\n     * The name is globally unique among encoders and among decoders (but an\n     * encoder and a decoder can share the same name).\n     */\n    const char *name;\n\n    /**\n     * Type of codec implemented by the hardware accelerator.\n     *\n     * See AVMEDIA_TYPE_xxx\n     */\n    enum AVMediaType type;\n\n    /**\n     * Codec implemented by the hardware accelerator.\n     *\n     * See AV_CODEC_ID_xxx\n     */\n    enum AVCodecID id;\n\n    /**\n     * Supported pixel format.\n     *\n     * Only hardware accelerated formats are supported here.\n     */\n    enum AVPixelFormat pix_fmt;\n\n    /**\n     * Hardware accelerated codec capabilities.\n     * see FF_HWACCEL_CODEC_CAP_*\n     */\n    int capabilities;\n\n    /*****************************************************************\n     * No fields below this line are part of the public API. They\n     * may not be used outside of libavcodec and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    struct AVHWAccel *next;\n\n    /**\n     * Allocate a custom buffer\n     */\n    int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame);\n\n    /**\n     * Called at the beginning of each frame or field picture.\n     *\n     * Meaningful frame information (codec specific) is guaranteed to\n     * be parsed at this point. This function is mandatory.\n     *\n     * Note that buf can be NULL along with buf_size set to 0.\n     * Otherwise, this means the whole frame is available at this point.\n     *\n     * @param avctx the codec context\n     * @param buf the frame data buffer base\n     * @param buf_size the size of the frame in bytes\n     * @return zero if successful, a negative value otherwise\n     */\n    int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);\n\n    /**\n     * Callback for each slice.\n     *\n     * Meaningful slice information (codec specific) is guaranteed to\n     * be parsed at this point. This function is mandatory.\n     * The only exception is XvMC, that works on MB level.\n     *\n     * @param avctx the codec context\n     * @param buf the slice data buffer base\n     * @param buf_size the size of the slice in bytes\n     * @return zero if successful, a negative value otherwise\n     */\n    int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);\n\n    /**\n     * Called at the end of each frame or field picture.\n     *\n     * The whole picture is parsed at this point and can now be sent\n     * to the hardware accelerator. This function is mandatory.\n     *\n     * @param avctx the codec context\n     * @return zero if successful, a negative value otherwise\n     */\n    int (*end_frame)(AVCodecContext *avctx);\n\n    /**\n     * Size of per-frame hardware accelerator private data.\n     *\n     * Private data is allocated with av_mallocz() before\n     * AVCodecContext.get_buffer() and deallocated after\n     * AVCodecContext.release_buffer().\n     */\n    int frame_priv_data_size;\n\n    /**\n     * Called for every Macroblock in a slice.\n     *\n     * XvMC uses it to replace the ff_mpv_decode_mb().\n     * Instead of decoding to raw picture, MB parameters are\n     * stored in an array provided by the video driver.\n     *\n     * @param s the mpeg context\n     */\n    void (*decode_mb)(struct MpegEncContext *s);\n\n    /**\n     * Initialize the hwaccel private data.\n     *\n     * This will be called from ff_get_format(), after hwaccel and\n     * hwaccel_context are set and the hwaccel private data in AVCodecInternal\n     * is allocated.\n     */\n    int (*init)(AVCodecContext *avctx);\n\n    /**\n     * Uninitialize the hwaccel private data.\n     *\n     * This will be called from get_format() or avcodec_close(), after hwaccel\n     * and hwaccel_context are already uninitialized.\n     */\n    int (*uninit)(AVCodecContext *avctx);\n\n    /**\n     * Size of the private data to allocate in\n     * AVCodecInternal.hwaccel_priv_data.\n     */\n    int priv_data_size;\n} AVHWAccel;\n\n/**\n * Hardware acceleration should be used for decoding even if the codec level\n * used is unknown or higher than the maximum supported level reported by the\n * hardware driver.\n */\n#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0)\n\n/**\n * Hardware acceleration can output YUV pixel formats with a different chroma\n * sampling than 4:2:0 and/or other than 8 bits per component.\n */\n#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1)\n\n/**\n * @}\n */\n\n/**\n * @defgroup lavc_picture AVPicture\n *\n * Functions for working with AVPicture\n * @{\n */\n\n/**\n * Picture data structure.\n *\n * Up to four components can be stored into it, the last component is\n * alpha.\n */\ntypedef struct AVPicture {\n    uint8_t *data[AV_NUM_DATA_POINTERS];    ///< pointers to the image data planes\n    int linesize[AV_NUM_DATA_POINTERS];     ///< number of bytes per line\n} AVPicture;\n\n/**\n * @}\n */\n\nenum AVSubtitleType {\n    SUBTITLE_NONE,\n\n    SUBTITLE_BITMAP,                ///< A bitmap, pict will be set\n\n    /**\n     * Plain text, the text field must be set by the decoder and is\n     * authoritative. ass and pict fields may contain approximations.\n     */\n    SUBTITLE_TEXT,\n\n    /**\n     * Formatted text, the ass field must be set by the decoder and is\n     * authoritative. pict and text fields may contain approximations.\n     */\n    SUBTITLE_ASS,\n};\n\n#define AV_SUBTITLE_FLAG_FORCED 0x00000001\n\ntypedef struct AVSubtitleRect {\n    int x;         ///< top left corner  of pict, undefined when pict is not set\n    int y;         ///< top left corner  of pict, undefined when pict is not set\n    int w;         ///< width            of pict, undefined when pict is not set\n    int h;         ///< height           of pict, undefined when pict is not set\n    int nb_colors; ///< number of colors in pict, undefined when pict is not set\n\n    /**\n     * data+linesize for the bitmap of this subtitle.\n     * can be set for text/ass as well once they are rendered\n     */\n    AVPicture pict;\n    enum AVSubtitleType type;\n\n    char *text;                     ///< 0 terminated plain UTF-8 text\n\n    /**\n     * 0 terminated ASS/SSA compatible event line.\n     * The presentation of this is unaffected by the other values in this\n     * struct.\n     */\n    char *ass;\n\n    int flags;\n} AVSubtitleRect;\n\ntypedef struct AVSubtitle {\n    uint16_t format; /* 0 = graphics */\n    uint32_t start_display_time; /* relative to packet pts, in ms */\n    uint32_t end_display_time; /* relative to packet pts, in ms */\n    unsigned num_rects;\n    AVSubtitleRect **rects;\n    int64_t pts;    ///< Same as packet pts, in AV_TIME_BASE\n} AVSubtitle;\n\n/**\n * If c is NULL, returns the first registered codec,\n * if c is non-NULL, returns the next registered codec after c,\n * or NULL if c is the last one.\n */\nAVCodec *av_codec_next(const AVCodec *c);\n\n/**\n * Return the LIBAVCODEC_VERSION_INT constant.\n */\nunsigned avcodec_version(void);\n\n/**\n * Return the libavcodec build-time configuration.\n */\nconst char *avcodec_configuration(void);\n\n/**\n * Return the libavcodec license.\n */\nconst char *avcodec_license(void);\n\n/**\n * Register the codec codec and initialize libavcodec.\n *\n * @warning either this function or avcodec_register_all() must be called\n * before any other libavcodec functions.\n *\n * @see avcodec_register_all()\n */\nvoid avcodec_register(AVCodec *codec);\n\n/**\n * Register all the codecs, parsers and bitstream filters which were enabled at\n * configuration time. If you do not call this function you can select exactly\n * which formats you want to support, by using the individual registration\n * functions.\n *\n * @see avcodec_register\n * @see av_register_codec_parser\n * @see av_register_bitstream_filter\n */\nvoid avcodec_register_all(void);\n\n/**\n * Allocate an AVCodecContext and set its fields to default values. The\n * resulting struct should be freed with avcodec_free_context().\n *\n * @param codec if non-NULL, allocate private data and initialize defaults\n *              for the given codec. It is illegal to then call avcodec_open2()\n *              with a different codec.\n *              If NULL, then the codec-specific defaults won't be initialized,\n *              which may result in suboptimal default settings (this is\n *              important mainly for encoders, e.g. libx264).\n *\n * @return An AVCodecContext filled with default values or NULL on failure.\n * @see avcodec_get_context_defaults\n */\nAVCodecContext *avcodec_alloc_context3(const AVCodec *codec);\n\n/**\n * Free the codec context and everything associated with it and write NULL to\n * the provided pointer.\n */\nvoid avcodec_free_context(AVCodecContext **avctx);\n\n/**\n * Set the fields of the given AVCodecContext to default values corresponding\n * to the given codec (defaults may be codec-dependent).\n *\n * Do not call this function if a non-NULL codec has been passed\n * to avcodec_alloc_context3() that allocated this AVCodecContext.\n * If codec is non-NULL, it is illegal to call avcodec_open2() with a\n * different codec on this AVCodecContext.\n */\nint avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);\n\n/**\n * Get the AVClass for AVCodecContext. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *avcodec_get_class(void);\n\n/**\n * Get the AVClass for AVFrame. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *avcodec_get_frame_class(void);\n\n/**\n * Get the AVClass for AVSubtitleRect. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *avcodec_get_subtitle_rect_class(void);\n\n/**\n * Copy the settings of the source AVCodecContext into the destination\n * AVCodecContext. The resulting destination codec context will be\n * unopened, i.e. you are required to call avcodec_open2() before you\n * can use this AVCodecContext to decode/encode video/audio data.\n *\n * @param dest target codec context, should be initialized with\n *             avcodec_alloc_context3(NULL), but otherwise uninitialized\n * @param src source codec context\n * @return AVERROR() on error (e.g. memory allocation error), 0 on success\n */\nint avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);\n\n#if FF_API_AVFRAME_LAVC\n/**\n * @deprecated use av_frame_alloc()\n */\nattribute_deprecated\nAVFrame *avcodec_alloc_frame(void);\n\n/**\n * Set the fields of the given AVFrame to default values.\n *\n * @param frame The AVFrame of which the fields should be set to default values.\n *\n * @deprecated use av_frame_unref()\n */\nattribute_deprecated\nvoid avcodec_get_frame_defaults(AVFrame *frame);\n\n/**\n * Free the frame and any dynamically allocated objects in it,\n * e.g. extended_data.\n *\n * @param frame frame to be freed. The pointer will be set to NULL.\n *\n * @warning this function does NOT free the data buffers themselves\n * (it does not know how, since they might have been allocated with\n *  a custom get_buffer()).\n *\n * @deprecated use av_frame_free()\n */\nattribute_deprecated\nvoid avcodec_free_frame(AVFrame **frame);\n#endif\n\n/**\n * Initialize the AVCodecContext to use the given AVCodec. Prior to using this\n * function the context has to be allocated with avcodec_alloc_context3().\n *\n * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),\n * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for\n * retrieving a codec.\n *\n * @warning This function is not thread safe!\n *\n * @note Always call this function before using decoding routines (such as\n * @ref avcodec_decode_video2()).\n *\n * @code\n * avcodec_register_all();\n * av_dict_set(&opts, \"b\", \"2.5M\", 0);\n * codec = avcodec_find_decoder(AV_CODEC_ID_H264);\n * if (!codec)\n *     exit(1);\n *\n * context = avcodec_alloc_context3(codec);\n *\n * if (avcodec_open2(context, codec, opts) < 0)\n *     exit(1);\n * @endcode\n *\n * @param avctx The context to initialize.\n * @param codec The codec to open this context for. If a non-NULL codec has been\n *              previously passed to avcodec_alloc_context3() or\n *              avcodec_get_context_defaults3() for this context, then this\n *              parameter MUST be either NULL or equal to the previously passed\n *              codec.\n * @param options A dictionary filled with AVCodecContext and codec-private options.\n *                On return this object will be filled with options that were not found.\n *\n * @return zero on success, a negative value on error\n * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),\n *      av_dict_set(), av_opt_find().\n */\nint avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);\n\n/**\n * Close a given AVCodecContext and free all the data associated with it\n * (but not the AVCodecContext itself).\n *\n * Calling this function on an AVCodecContext that hasn't been opened will free\n * the codec-specific data allocated in avcodec_alloc_context3() /\n * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will\n * do nothing.\n */\nint avcodec_close(AVCodecContext *avctx);\n\n/**\n * Free all allocated data in the given subtitle struct.\n *\n * @param sub AVSubtitle to free.\n */\nvoid avsubtitle_free(AVSubtitle *sub);\n\n/**\n * @}\n */\n\n/**\n * @addtogroup lavc_packet\n * @{\n */\n\n#if FF_API_DESTRUCT_PACKET\n/**\n * Default packet destructor.\n * @deprecated use the AVBuffer API instead\n */\nattribute_deprecated\nvoid av_destruct_packet(AVPacket *pkt);\n#endif\n\n/**\n * Initialize optional fields of a packet with default values.\n *\n * Note, this does not touch the data and size members, which have to be\n * initialized separately.\n *\n * @param pkt packet\n */\nvoid av_init_packet(AVPacket *pkt);\n\n/**\n * Allocate the payload of a packet and initialize its fields with\n * default values.\n *\n * @param pkt packet\n * @param size wanted payload size\n * @return 0 if OK, AVERROR_xxx otherwise\n */\nint av_new_packet(AVPacket *pkt, int size);\n\n/**\n * Reduce packet size, correctly zeroing padding\n *\n * @param pkt packet\n * @param size new size\n */\nvoid av_shrink_packet(AVPacket *pkt, int size);\n\n/**\n * Increase packet size, correctly zeroing padding\n *\n * @param pkt packet\n * @param grow_by number of bytes by which to increase the size of the packet\n */\nint av_grow_packet(AVPacket *pkt, int grow_by);\n\n/**\n * Initialize a reference-counted packet from av_malloc()ed data.\n *\n * @param pkt packet to be initialized. This function will set the data, size,\n *        buf and destruct fields, all others are left untouched.\n * @param data Data allocated by av_malloc() to be used as packet data. If this\n *        function returns successfully, the data is owned by the underlying AVBuffer.\n *        The caller may not access the data through other means.\n * @param size size of data in bytes, without the padding. I.e. the full buffer\n *        size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE.\n *\n * @return 0 on success, a negative AVERROR on error\n */\nint av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);\n\n/**\n * @warning This is a hack - the packet memory allocation stuff is broken. The\n * packet is allocated if it was not really allocated.\n */\nint av_dup_packet(AVPacket *pkt);\n\n/**\n * Copy packet, including contents\n *\n * @return 0 on success, negative AVERROR on fail\n */\nint av_copy_packet(AVPacket *dst, const AVPacket *src);\n\n/**\n * Copy packet side data\n *\n * @return 0 on success, negative AVERROR on fail\n */\nint av_copy_packet_side_data(AVPacket *dst, const AVPacket *src);\n\n/**\n * Free a packet.\n *\n * @param pkt packet to free\n */\nvoid av_free_packet(AVPacket *pkt);\n\n/**\n * Allocate new information of a packet.\n *\n * @param pkt packet\n * @param type side information type\n * @param size side information size\n * @return pointer to fresh allocated data or NULL otherwise\n */\nuint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,\n                                 int size);\n\n/**\n * Shrink the already allocated side data buffer\n *\n * @param pkt packet\n * @param type side information type\n * @param size new side information size\n * @return 0 on success, < 0 on failure\n */\nint av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,\n                               int size);\n\n/**\n * Get side information from packet.\n *\n * @param pkt packet\n * @param type desired side information type\n * @param size pointer for side information size to store (optional)\n * @return pointer to data if present or NULL otherwise\n */\nuint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,\n                                 int *size);\n\nint av_packet_merge_side_data(AVPacket *pkt);\n\nint av_packet_split_side_data(AVPacket *pkt);\n\n/**\n * Pack a dictionary for use in side_data.\n *\n * @param dict The dictionary to pack.\n * @param size pointer to store the size of the returned data\n * @return pointer to data if successful, NULL otherwise\n */\nuint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size);\n/**\n * Unpack a dictionary from side_data.\n *\n * @param data data from side_data\n * @param size size of the data\n * @param dict the metadata storage dictionary\n * @return 0 on success, < 0 on failure\n */\nint av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict);\n\n\n/**\n * Convenience function to free all the side data stored.\n * All the other fields stay untouched.\n *\n * @param pkt packet\n */\nvoid av_packet_free_side_data(AVPacket *pkt);\n\n/**\n * Setup a new reference to the data described by a given packet\n *\n * If src is reference-counted, setup dst as a new reference to the\n * buffer in src. Otherwise allocate a new buffer in dst and copy the\n * data from src into it.\n *\n * All the other fields are copied from src.\n *\n * @see av_packet_unref\n *\n * @param dst Destination packet\n * @param src Source packet\n *\n * @return 0 on success, a negative AVERROR on error.\n */\nint av_packet_ref(AVPacket *dst, const AVPacket *src);\n\n/**\n * Wipe the packet.\n *\n * Unreference the buffer referenced by the packet and reset the\n * remaining packet fields to their default values.\n *\n * @param pkt The packet to be unreferenced.\n */\nvoid av_packet_unref(AVPacket *pkt);\n\n/**\n * Move every field in src to dst and reset src.\n *\n * @see av_packet_unref\n *\n * @param src Source packet, will be reset\n * @param dst Destination packet\n */\nvoid av_packet_move_ref(AVPacket *dst, AVPacket *src);\n\n/**\n * Copy only \"properties\" fields from src to dst.\n *\n * Properties for the purpose of this function are all the fields\n * beside those related to the packet data (buf, data, size)\n *\n * @param dst Destination packet\n * @param src Source packet\n *\n * @return 0 on success AVERROR on failure.\n *\n */\nint av_packet_copy_props(AVPacket *dst, const AVPacket *src);\n\n/**\n * Convert valid timing fields (timestamps / durations) in a packet from one\n * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be\n * ignored.\n *\n * @param pkt packet on which the conversion will be performed\n * @param tb_src source timebase, in which the timing fields in pkt are\n *               expressed\n * @param tb_dst destination timebase, to which the timing fields will be\n *               converted\n */\nvoid av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);\n\n/**\n * @}\n */\n\n/**\n * @addtogroup lavc_decoding\n * @{\n */\n\n/**\n * Find a registered decoder with a matching codec ID.\n *\n * @param id AVCodecID of the requested decoder\n * @return A decoder if one was found, NULL otherwise.\n */\nAVCodec *avcodec_find_decoder(enum AVCodecID id);\n\n/**\n * Find a registered decoder with the specified name.\n *\n * @param name name of the requested decoder\n * @return A decoder if one was found, NULL otherwise.\n */\nAVCodec *avcodec_find_decoder_by_name(const char *name);\n\n#if FF_API_GET_BUFFER\nattribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);\nattribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);\nattribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);\n#endif\n\n/**\n * The default callback for AVCodecContext.get_buffer2(). It is made public so\n * it can be called by custom get_buffer2() implementations for decoders without\n * CODEC_CAP_DR1 set.\n */\nint avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);\n\n#if FF_API_EMU_EDGE\n/**\n * Return the amount of padding in pixels which the get_buffer callback must\n * provide around the edge of the image for codecs which do not have the\n * CODEC_FLAG_EMU_EDGE flag.\n *\n * @return Required padding in pixels.\n *\n * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer\n * needed\n */\nattribute_deprecated\nunsigned avcodec_get_edge_width(void);\n#endif\n\n/**\n * Modify width and height values so that they will result in a memory\n * buffer that is acceptable for the codec if you do not use any horizontal\n * padding.\n *\n * May only be used if a codec with CODEC_CAP_DR1 has been opened.\n */\nvoid avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);\n\n/**\n * Modify width and height values so that they will result in a memory\n * buffer that is acceptable for the codec if you also ensure that all\n * line sizes are a multiple of the respective linesize_align[i].\n *\n * May only be used if a codec with CODEC_CAP_DR1 has been opened.\n */\nvoid avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,\n                               int linesize_align[AV_NUM_DATA_POINTERS]);\n\n/**\n * Converts AVChromaLocation to swscale x/y chroma position.\n *\n * The positions represent the chroma (0,0) position in a coordinates system\n * with luma (0,0) representing the origin and luma(1,1) representing 256,256\n *\n * @param xpos  horizontal chroma sample position\n * @param ypos  vertical   chroma sample position\n */\nint avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos);\n\n/**\n * Converts swscale x/y chroma position to AVChromaLocation.\n *\n * The positions represent the chroma (0,0) position in a coordinates system\n * with luma (0,0) representing the origin and luma(1,1) representing 256,256\n *\n * @param xpos  horizontal chroma sample position\n * @param ypos  vertical   chroma sample position\n */\nenum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos);\n\n#if FF_API_OLD_DECODE_AUDIO\n/**\n * Wrapper function which calls avcodec_decode_audio4.\n *\n * @deprecated Use avcodec_decode_audio4 instead.\n *\n * Decode the audio frame of size avpkt->size from avpkt->data into samples.\n * Some decoders may support multiple frames in a single AVPacket, such\n * decoders would then just decode the first frame. In this case,\n * avcodec_decode_audio3 has to be called again with an AVPacket that contains\n * the remaining data in order to decode the second frame etc.\n * If no frame\n * could be outputted, frame_size_ptr is zero. Otherwise, it is the\n * decompressed frame size in bytes.\n *\n * @warning You must set frame_size_ptr to the allocated size of the\n * output buffer before calling avcodec_decode_audio3().\n *\n * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than\n * the actual read bytes because some optimized bitstream readers read 32 or 64\n * bits at once and could read over the end.\n *\n * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that\n * no overreading happens for damaged MPEG streams.\n *\n * @warning You must not provide a custom get_buffer() when using\n * avcodec_decode_audio3().  Doing so will override it with\n * avcodec_default_get_buffer.  Use avcodec_decode_audio4() instead,\n * which does allow the application to provide a custom get_buffer().\n *\n * @note You might have to align the input buffer avpkt->data and output buffer\n * samples. The alignment requirements depend on the CPU: On some CPUs it isn't\n * necessary at all, on others it won't work at all if not aligned and on others\n * it will work but it will have an impact on performance.\n *\n * In practice, avpkt->data should have 4 byte alignment at minimum and\n * samples should be 16 byte aligned unless the CPU doesn't need it\n * (AltiVec and SSE do).\n *\n * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay\n * between input and output, these need to be fed with avpkt->data=NULL,\n * avpkt->size=0 at the end to return the remaining frames.\n *\n * @param avctx the codec context\n * @param[out] samples the output buffer, sample type in avctx->sample_fmt\n *                     If the sample format is planar, each channel plane will\n *                     be the same size, with no padding between channels.\n * @param[in,out] frame_size_ptr the output buffer size in bytes\n * @param[in] avpkt The input AVPacket containing the input buffer.\n *            You can create such packet with av_init_packet() and by then setting\n *            data and size, some decoders might in addition need other fields.\n *            All decoders are designed to use the least fields possible though.\n * @return On error a negative value is returned, otherwise the number of bytes\n * used or zero if no frame data was decompressed (used) from the input AVPacket.\n */\nattribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,\n                         int *frame_size_ptr,\n                         AVPacket *avpkt);\n#endif\n\n/**\n * Decode the audio frame of size avpkt->size from avpkt->data into frame.\n *\n * Some decoders may support multiple frames in a single AVPacket. Such\n * decoders would then just decode the first frame and the return value would be\n * less than the packet size. In this case, avcodec_decode_audio4 has to be\n * called again with an AVPacket containing the remaining data in order to\n * decode the second frame, etc...  Even if no frames are returned, the packet\n * needs to be fed to the decoder with remaining data until it is completely\n * consumed or an error occurs.\n *\n * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input\n * and output. This means that for some packets they will not immediately\n * produce decoded output and need to be flushed at the end of decoding to get\n * all the decoded data. Flushing is done by calling this function with packets\n * with avpkt->data set to NULL and avpkt->size set to 0 until it stops\n * returning samples. It is safe to flush even those decoders that are not\n * marked with CODEC_CAP_DELAY, then no samples will be returned.\n *\n * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE\n *          larger than the actual read bytes because some optimized bitstream\n *          readers read 32 or 64 bits at once and could read over the end.\n *\n * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()\n * before packets may be fed to the decoder.\n *\n * @param      avctx the codec context\n * @param[out] frame The AVFrame in which to store decoded audio samples.\n *                   The decoder will allocate a buffer for the decoded frame by\n *                   calling the AVCodecContext.get_buffer2() callback.\n *                   When AVCodecContext.refcounted_frames is set to 1, the frame is\n *                   reference counted and the returned reference belongs to the\n *                   caller. The caller must release the frame using av_frame_unref()\n *                   when the frame is no longer needed. The caller may safely write\n *                   to the frame if av_frame_is_writable() returns 1.\n *                   When AVCodecContext.refcounted_frames is set to 0, the returned\n *                   reference belongs to the decoder and is valid only until the\n *                   next call to this function or until closing or flushing the\n *                   decoder. The caller may not write to it.\n * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is\n *                           non-zero. Note that this field being set to zero\n *                           does not mean that an error has occurred. For\n *                           decoders with CODEC_CAP_DELAY set, no given decode\n *                           call is guaranteed to produce a frame.\n * @param[in]  avpkt The input AVPacket containing the input buffer.\n *                   At least avpkt->data and avpkt->size should be set. Some\n *                   decoders might also require additional fields to be set.\n * @return A negative error code is returned if an error occurred during\n *         decoding, otherwise the number of bytes consumed from the input\n *         AVPacket is returned.\n */\nint avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,\n                          int *got_frame_ptr, const AVPacket *avpkt);\n\n/**\n * Decode the video frame of size avpkt->size from avpkt->data into picture.\n * Some decoders may support multiple frames in a single AVPacket, such\n * decoders would then just decode the first frame.\n *\n * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than\n * the actual read bytes because some optimized bitstream readers read 32 or 64\n * bits at once and could read over the end.\n *\n * @warning The end of the input buffer buf should be set to 0 to ensure that\n * no overreading happens for damaged MPEG streams.\n *\n * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay\n * between input and output, these need to be fed with avpkt->data=NULL,\n * avpkt->size=0 at the end to return the remaining frames.\n *\n * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()\n * before packets may be fed to the decoder.\n *\n * @param avctx the codec context\n * @param[out] picture The AVFrame in which the decoded video frame will be stored.\n *             Use av_frame_alloc() to get an AVFrame. The codec will\n *             allocate memory for the actual bitmap by calling the\n *             AVCodecContext.get_buffer2() callback.\n *             When AVCodecContext.refcounted_frames is set to 1, the frame is\n *             reference counted and the returned reference belongs to the\n *             caller. The caller must release the frame using av_frame_unref()\n *             when the frame is no longer needed. The caller may safely write\n *             to the frame if av_frame_is_writable() returns 1.\n *             When AVCodecContext.refcounted_frames is set to 0, the returned\n *             reference belongs to the decoder and is valid only until the\n *             next call to this function or until closing or flushing the\n *             decoder. The caller may not write to it.\n *\n * @param[in] avpkt The input AVPacket containing the input buffer.\n *            You can create such packet with av_init_packet() and by then setting\n *            data and size, some decoders might in addition need other fields like\n *            flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least\n *            fields possible.\n * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.\n * @return On error a negative value is returned, otherwise the number of bytes\n * used or zero if no frame could be decompressed.\n */\nint avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,\n                         int *got_picture_ptr,\n                         const AVPacket *avpkt);\n\n/**\n * Decode a subtitle message.\n * Return a negative value on error, otherwise return the number of bytes used.\n * If no subtitle could be decompressed, got_sub_ptr is zero.\n * Otherwise, the subtitle is stored in *sub.\n * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for\n * simplicity, because the performance difference is expect to be negligible\n * and reusing a get_buffer written for video codecs would probably perform badly\n * due to a potentially very different allocation pattern.\n *\n * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input\n * and output. This means that for some packets they will not immediately\n * produce decoded output and need to be flushed at the end of decoding to get\n * all the decoded data. Flushing is done by calling this function with packets\n * with avpkt->data set to NULL and avpkt->size set to 0 until it stops\n * returning subtitles. It is safe to flush even those decoders that are not\n * marked with CODEC_CAP_DELAY, then no subtitles will be returned.\n *\n * @note The AVCodecContext MUST have been opened with @ref avcodec_open2()\n * before packets may be fed to the decoder.\n *\n * @param avctx the codec context\n * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored,\n *                 must be freed with avsubtitle_free if *got_sub_ptr is set.\n * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.\n * @param[in] avpkt The input AVPacket containing the input buffer.\n */\nint avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,\n                            int *got_sub_ptr,\n                            AVPacket *avpkt);\n\n/**\n * @defgroup lavc_parsing Frame parsing\n * @{\n */\n\nenum AVPictureStructure {\n    AV_PICTURE_STRUCTURE_UNKNOWN,      //< unknown\n    AV_PICTURE_STRUCTURE_TOP_FIELD,    //< coded as top field\n    AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field\n    AV_PICTURE_STRUCTURE_FRAME,        //< coded as frame\n};\n\ntypedef struct AVCodecParserContext {\n    void *priv_data;\n    struct AVCodecParser *parser;\n    int64_t frame_offset; /* offset of the current frame */\n    int64_t cur_offset; /* current offset\n                           (incremented by each av_parser_parse()) */\n    int64_t next_frame_offset; /* offset of the next frame */\n    /* video info */\n    int pict_type; /* XXX: Put it back in AVCodecContext. */\n    /**\n     * This field is used for proper frame duration computation in lavf.\n     * It signals, how much longer the frame duration of the current frame\n     * is compared to normal frame duration.\n     *\n     * frame_duration = (1 + repeat_pict) * time_base\n     *\n     * It is used by codecs like H.264 to display telecined material.\n     */\n    int repeat_pict; /* XXX: Put it back in AVCodecContext. */\n    int64_t pts;     /* pts of the current frame */\n    int64_t dts;     /* dts of the current frame */\n\n    /* private data */\n    int64_t last_pts;\n    int64_t last_dts;\n    int fetch_timestamp;\n\n#define AV_PARSER_PTS_NB 4\n    int cur_frame_start_index;\n    int64_t cur_frame_offset[AV_PARSER_PTS_NB];\n    int64_t cur_frame_pts[AV_PARSER_PTS_NB];\n    int64_t cur_frame_dts[AV_PARSER_PTS_NB];\n\n    int flags;\n#define PARSER_FLAG_COMPLETE_FRAMES           0x0001\n#define PARSER_FLAG_ONCE                      0x0002\n/// Set if the parser has a valid file offset\n#define PARSER_FLAG_FETCHED_OFFSET            0x0004\n#define PARSER_FLAG_USE_CODEC_TS              0x1000\n\n    int64_t offset;      ///< byte offset from starting packet start\n    int64_t cur_frame_end[AV_PARSER_PTS_NB];\n\n    /**\n     * Set by parser to 1 for key frames and 0 for non-key frames.\n     * It is initialized to -1, so if the parser doesn't set this flag,\n     * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames\n     * will be used.\n     */\n    int key_frame;\n\n    /**\n     * Time difference in stream time base units from the pts of this\n     * packet to the point at which the output from the decoder has converged\n     * independent from the availability of previous frames. That is, the\n     * frames are virtually identical no matter if decoding started from\n     * the very first frame or from this keyframe.\n     * Is AV_NOPTS_VALUE if unknown.\n     * This field is not the display duration of the current frame.\n     * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY\n     * set.\n     *\n     * The purpose of this field is to allow seeking in streams that have no\n     * keyframes in the conventional sense. It corresponds to the\n     * recovery point SEI in H.264 and match_time_delta in NUT. It is also\n     * essential for some types of subtitle streams to ensure that all\n     * subtitles are correctly displayed after seeking.\n     */\n    int64_t convergence_duration;\n\n    // Timestamp generation support:\n    /**\n     * Synchronization point for start of timestamp generation.\n     *\n     * Set to >0 for sync point, 0 for no sync point and <0 for undefined\n     * (default).\n     *\n     * For example, this corresponds to presence of H.264 buffering period\n     * SEI message.\n     */\n    int dts_sync_point;\n\n    /**\n     * Offset of the current timestamp against last timestamp sync point in\n     * units of AVCodecContext.time_base.\n     *\n     * Set to INT_MIN when dts_sync_point unused. Otherwise, it must\n     * contain a valid timestamp offset.\n     *\n     * Note that the timestamp of sync point has usually a nonzero\n     * dts_ref_dts_delta, which refers to the previous sync point. Offset of\n     * the next frame after timestamp sync point will be usually 1.\n     *\n     * For example, this corresponds to H.264 cpb_removal_delay.\n     */\n    int dts_ref_dts_delta;\n\n    /**\n     * Presentation delay of current frame in units of AVCodecContext.time_base.\n     *\n     * Set to INT_MIN when dts_sync_point unused. Otherwise, it must\n     * contain valid non-negative timestamp delta (presentation time of a frame\n     * must not lie in the past).\n     *\n     * This delay represents the difference between decoding and presentation\n     * time of the frame.\n     *\n     * For example, this corresponds to H.264 dpb_output_delay.\n     */\n    int pts_dts_delta;\n\n    /**\n     * Position of the packet in file.\n     *\n     * Analogous to cur_frame_pts/dts\n     */\n    int64_t cur_frame_pos[AV_PARSER_PTS_NB];\n\n    /**\n     * Byte position of currently parsed frame in stream.\n     */\n    int64_t pos;\n\n    /**\n     * Previous frame byte position.\n     */\n    int64_t last_pos;\n\n    /**\n     * Duration of the current frame.\n     * For audio, this is in units of 1 / AVCodecContext.sample_rate.\n     * For all other types, this is in units of AVCodecContext.time_base.\n     */\n    int duration;\n\n    enum AVFieldOrder field_order;\n\n    /**\n     * Indicate whether a picture is coded as a frame, top field or bottom field.\n     *\n     * For example, H.264 field_pic_flag equal to 0 corresponds to\n     * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag\n     * equal to 1 and bottom_field_flag equal to 0 corresponds to\n     * AV_PICTURE_STRUCTURE_TOP_FIELD.\n     */\n    enum AVPictureStructure picture_structure;\n\n    /**\n     * Picture number incremented in presentation or output order.\n     * This field may be reinitialized at the first picture of a new sequence.\n     *\n     * For example, this corresponds to H.264 PicOrderCnt.\n     */\n    int output_picture_number;\n\n    /**\n     * Dimensions of the decoded video intended for presentation.\n     */\n    int width;\n    int height;\n\n    /**\n     * Dimensions of the coded video.\n     */\n    int coded_width;\n    int coded_height;\n\n    /**\n     * The format of the coded data, corresponds to enum AVPixelFormat for video\n     * and for enum AVSampleFormat for audio.\n     *\n     * Note that a decoder can have considerable freedom in how exactly it\n     * decodes the data, so the format reported here might be different from the\n     * one returned by a decoder.\n     */\n    int format;\n} AVCodecParserContext;\n\ntypedef struct AVCodecParser {\n    int codec_ids[5]; /* several codec IDs are permitted */\n    int priv_data_size;\n    int (*parser_init)(AVCodecParserContext *s);\n    /* This callback never returns an error, a negative value means that\n     * the frame start was in a previous packet. */\n    int (*parser_parse)(AVCodecParserContext *s,\n                        AVCodecContext *avctx,\n                        const uint8_t **poutbuf, int *poutbuf_size,\n                        const uint8_t *buf, int buf_size);\n    void (*parser_close)(AVCodecParserContext *s);\n    int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);\n    struct AVCodecParser *next;\n} AVCodecParser;\n\nAVCodecParser *av_parser_next(const AVCodecParser *c);\n\nvoid av_register_codec_parser(AVCodecParser *parser);\nAVCodecParserContext *av_parser_init(int codec_id);\n\n/**\n * Parse a packet.\n *\n * @param s             parser context.\n * @param avctx         codec context.\n * @param poutbuf       set to pointer to parsed buffer or NULL if not yet finished.\n * @param poutbuf_size  set to size of parsed buffer or zero if not yet finished.\n * @param buf           input buffer.\n * @param buf_size      input length, to signal EOF, this should be 0 (so that the last frame can be output).\n * @param pts           input presentation timestamp.\n * @param dts           input decoding timestamp.\n * @param pos           input byte position in stream.\n * @return the number of bytes of the input bitstream used.\n *\n * Example:\n * @code\n *   while(in_len){\n *       len = av_parser_parse2(myparser, AVCodecContext, &data, &size,\n *                                        in_data, in_len,\n *                                        pts, dts, pos);\n *       in_data += len;\n *       in_len  -= len;\n *\n *       if(size)\n *          decode_frame(data, size);\n *   }\n * @endcode\n */\nint av_parser_parse2(AVCodecParserContext *s,\n                     AVCodecContext *avctx,\n                     uint8_t **poutbuf, int *poutbuf_size,\n                     const uint8_t *buf, int buf_size,\n                     int64_t pts, int64_t dts,\n                     int64_t pos);\n\n/**\n * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed\n * @deprecated use AVBitStreamFilter\n */\nint av_parser_change(AVCodecParserContext *s,\n                     AVCodecContext *avctx,\n                     uint8_t **poutbuf, int *poutbuf_size,\n                     const uint8_t *buf, int buf_size, int keyframe);\nvoid av_parser_close(AVCodecParserContext *s);\n\n/**\n * @}\n * @}\n */\n\n/**\n * @addtogroup lavc_encoding\n * @{\n */\n\n/**\n * Find a registered encoder with a matching codec ID.\n *\n * @param id AVCodecID of the requested encoder\n * @return An encoder if one was found, NULL otherwise.\n */\nAVCodec *avcodec_find_encoder(enum AVCodecID id);\n\n/**\n * Find a registered encoder with the specified name.\n *\n * @param name name of the requested encoder\n * @return An encoder if one was found, NULL otherwise.\n */\nAVCodec *avcodec_find_encoder_by_name(const char *name);\n\n#if FF_API_OLD_ENCODE_AUDIO\n/**\n * Encode an audio frame from samples into buf.\n *\n * @deprecated Use avcodec_encode_audio2 instead.\n *\n * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.\n * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user\n * will know how much space is needed because it depends on the value passed\n * in buf_size as described below. In that case a lower value can be used.\n *\n * @param avctx the codec context\n * @param[out] buf the output buffer\n * @param[in] buf_size the output buffer size\n * @param[in] samples the input buffer containing the samples\n * The number of samples read from this buffer is frame_size*channels,\n * both of which are defined in avctx.\n * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of\n * samples read from samples is equal to:\n * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))\n * This also implies that av_get_bits_per_sample() must not return 0 for these\n * codecs.\n * @return On error a negative value is returned, on success zero or the number\n * of bytes used to encode the data read from the input buffer.\n */\nint attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,\n                                              uint8_t *buf, int buf_size,\n                                              const short *samples);\n#endif\n\n/**\n * Encode a frame of audio.\n *\n * Takes input samples from frame and writes the next output packet, if\n * available, to avpkt. The output packet does not necessarily contain data for\n * the most recent frame, as encoders can delay, split, and combine input frames\n * internally as needed.\n *\n * @param avctx     codec context\n * @param avpkt     output AVPacket.\n *                  The user can supply an output buffer by setting\n *                  avpkt->data and avpkt->size prior to calling the\n *                  function, but if the size of the user-provided data is not\n *                  large enough, encoding will fail. If avpkt->data and\n *                  avpkt->size are set, avpkt->destruct must also be set. All\n *                  other AVPacket fields will be reset by the encoder using\n *                  av_init_packet(). If avpkt->data is NULL, the encoder will\n *                  allocate it. The encoder will set avpkt->size to the size\n *                  of the output packet.\n *\n *                  If this function fails or produces no output, avpkt will be\n *                  freed using av_free_packet() (i.e. avpkt->destruct will be\n *                  called to free the user supplied buffer).\n * @param[in] frame AVFrame containing the raw audio data to be encoded.\n *                  May be NULL when flushing an encoder that has the\n *                  CODEC_CAP_DELAY capability set.\n *                  If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame\n *                  can have any number of samples.\n *                  If it is not set, frame->nb_samples must be equal to\n *                  avctx->frame_size for all frames except the last.\n *                  The final frame may be smaller than avctx->frame_size.\n * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the\n *                            output packet is non-empty, and to 0 if it is\n *                            empty. If the function returns an error, the\n *                            packet can be assumed to be invalid, and the\n *                            value of got_packet_ptr is undefined and should\n *                            not be used.\n * @return          0 on success, negative error code on failure\n */\nint avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,\n                          const AVFrame *frame, int *got_packet_ptr);\n\n#if FF_API_OLD_ENCODE_VIDEO\n/**\n * @deprecated use avcodec_encode_video2() instead.\n *\n * Encode a video frame from pict into buf.\n * The input picture should be\n * stored using a specific format, namely avctx.pix_fmt.\n *\n * @param avctx the codec context\n * @param[out] buf the output buffer for the bitstream of encoded frame\n * @param[in] buf_size the size of the output buffer in bytes\n * @param[in] pict the input picture to encode\n * @return On error a negative value is returned, on success zero or the number\n * of bytes used from the output buffer.\n */\nattribute_deprecated\nint avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,\n                         const AVFrame *pict);\n#endif\n\n/**\n * Encode a frame of video.\n *\n * Takes input raw video data from frame and writes the next output packet, if\n * available, to avpkt. The output packet does not necessarily contain data for\n * the most recent frame, as encoders can delay and reorder input frames\n * internally as needed.\n *\n * @param avctx     codec context\n * @param avpkt     output AVPacket.\n *                  The user can supply an output buffer by setting\n *                  avpkt->data and avpkt->size prior to calling the\n *                  function, but if the size of the user-provided data is not\n *                  large enough, encoding will fail. All other AVPacket fields\n *                  will be reset by the encoder using av_init_packet(). If\n *                  avpkt->data is NULL, the encoder will allocate it.\n *                  The encoder will set avpkt->size to the size of the\n *                  output packet. The returned data (if any) belongs to the\n *                  caller, he is responsible for freeing it.\n *\n *                  If this function fails or produces no output, avpkt will be\n *                  freed using av_free_packet() (i.e. avpkt->destruct will be\n *                  called to free the user supplied buffer).\n * @param[in] frame AVFrame containing the raw video data to be encoded.\n *                  May be NULL when flushing an encoder that has the\n *                  CODEC_CAP_DELAY capability set.\n * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the\n *                            output packet is non-empty, and to 0 if it is\n *                            empty. If the function returns an error, the\n *                            packet can be assumed to be invalid, and the\n *                            value of got_packet_ptr is undefined and should\n *                            not be used.\n * @return          0 on success, negative error code on failure\n */\nint avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,\n                          const AVFrame *frame, int *got_packet_ptr);\n\nint avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,\n                            const AVSubtitle *sub);\n\n\n/**\n * @}\n */\n\n#if FF_API_AVCODEC_RESAMPLE\n/**\n * @defgroup lavc_resample Audio resampling\n * @ingroup libavc\n * @deprecated use libswresample instead\n *\n * @{\n */\nstruct ReSampleContext;\nstruct AVResampleContext;\n\ntypedef struct ReSampleContext ReSampleContext;\n\n/**\n *  Initialize audio resampling context.\n *\n * @param output_channels  number of output channels\n * @param input_channels   number of input channels\n * @param output_rate      output sample rate\n * @param input_rate       input sample rate\n * @param sample_fmt_out   requested output sample format\n * @param sample_fmt_in    input sample format\n * @param filter_length    length of each FIR filter in the filterbank relative to the cutoff frequency\n * @param log2_phase_count log2 of the number of entries in the polyphase filterbank\n * @param linear           if 1 then the used FIR filter will be linearly interpolated\n                           between the 2 closest, if 0 the closest will be used\n * @param cutoff           cutoff frequency, 1.0 corresponds to half the output sampling rate\n * @return allocated ReSampleContext, NULL if error occurred\n */\nattribute_deprecated\nReSampleContext *av_audio_resample_init(int output_channels, int input_channels,\n                                        int output_rate, int input_rate,\n                                        enum AVSampleFormat sample_fmt_out,\n                                        enum AVSampleFormat sample_fmt_in,\n                                        int filter_length, int log2_phase_count,\n                                        int linear, double cutoff);\n\nattribute_deprecated\nint audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);\n\n/**\n * Free resample context.\n *\n * @param s a non-NULL pointer to a resample context previously\n *          created with av_audio_resample_init()\n */\nattribute_deprecated\nvoid audio_resample_close(ReSampleContext *s);\n\n\n/**\n * Initialize an audio resampler.\n * Note, if either rate is not an integer then simply scale both rates up so they are.\n * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq\n * @param log2_phase_count log2 of the number of entries in the polyphase filterbank\n * @param linear If 1 then the used FIR filter will be linearly interpolated\n                 between the 2 closest, if 0 the closest will be used\n * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate\n */\nattribute_deprecated\nstruct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);\n\n/**\n * Resample an array of samples using a previously configured context.\n * @param src an array of unconsumed samples\n * @param consumed the number of samples of src which have been consumed are returned here\n * @param src_size the number of unconsumed samples available\n * @param dst_size the amount of space in samples available in dst\n * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.\n * @return the number of samples written in dst or -1 if an error occurred\n */\nattribute_deprecated\nint av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);\n\n\n/**\n * Compensate samplerate/timestamp drift. The compensation is done by changing\n * the resampler parameters, so no audible clicks or similar distortions occur\n * @param compensation_distance distance in output samples over which the compensation should be performed\n * @param sample_delta number of output samples which should be output less\n *\n * example: av_resample_compensate(c, 10, 500)\n * here instead of 510 samples only 500 samples would be output\n *\n * note, due to rounding the actual compensation might be slightly different,\n * especially if the compensation_distance is large and the in_rate used during init is small\n */\nattribute_deprecated\nvoid av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);\nattribute_deprecated\nvoid av_resample_close(struct AVResampleContext *c);\n\n/**\n * @}\n */\n#endif\n\n/**\n * @addtogroup lavc_picture\n * @{\n */\n\n/**\n * Allocate memory for the pixels of a picture and setup the AVPicture\n * fields for it.\n *\n * Call avpicture_free() to free it.\n *\n * @param picture            the picture structure to be filled in\n * @param pix_fmt            the pixel format of the picture\n * @param width              the width of the picture\n * @param height             the height of the picture\n * @return zero if successful, a negative error code otherwise\n *\n * @see av_image_alloc(), avpicture_fill()\n */\nint avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height);\n\n/**\n * Free a picture previously allocated by avpicture_alloc().\n * The data buffer used by the AVPicture is freed, but the AVPicture structure\n * itself is not.\n *\n * @param picture the AVPicture to be freed\n */\nvoid avpicture_free(AVPicture *picture);\n\n/**\n * Setup the picture fields based on the specified image parameters\n * and the provided image data buffer.\n *\n * The picture fields are filled in by using the image data buffer\n * pointed to by ptr.\n *\n * If ptr is NULL, the function will fill only the picture linesize\n * array and return the required size for the image buffer.\n *\n * To allocate an image buffer and fill the picture data in one call,\n * use avpicture_alloc().\n *\n * @param picture       the picture to be filled in\n * @param ptr           buffer where the image data is stored, or NULL\n * @param pix_fmt       the pixel format of the image\n * @param width         the width of the image in pixels\n * @param height        the height of the image in pixels\n * @return the size in bytes required for src, a negative error code\n * in case of failure\n *\n * @see av_image_fill_arrays()\n */\nint avpicture_fill(AVPicture *picture, const uint8_t *ptr,\n                   enum AVPixelFormat pix_fmt, int width, int height);\n\n/**\n * Copy pixel data from an AVPicture into a buffer.\n *\n * avpicture_get_size() can be used to compute the required size for\n * the buffer to fill.\n *\n * @param src        source picture with filled data\n * @param pix_fmt    picture pixel format\n * @param width      picture width\n * @param height     picture height\n * @param dest       destination buffer\n * @param dest_size  destination buffer size in bytes\n * @return the number of bytes written to dest, or a negative value\n * (error code) on error, for example if the destination buffer is not\n * big enough\n *\n * @see av_image_copy_to_buffer()\n */\nint avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt,\n                     int width, int height,\n                     unsigned char *dest, int dest_size);\n\n/**\n * Calculate the size in bytes that a picture of the given width and height\n * would occupy if stored in the given picture format.\n *\n * @param pix_fmt    picture pixel format\n * @param width      picture width\n * @param height     picture height\n * @return the computed picture buffer size or a negative error code\n * in case of error\n *\n * @see av_image_get_buffer_size().\n */\nint avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);\n\n#if FF_API_DEINTERLACE\n/**\n *  deinterlace - if not supported return -1\n *\n * @deprecated - use yadif (in libavfilter) instead\n */\nattribute_deprecated\nint avpicture_deinterlace(AVPicture *dst, const AVPicture *src,\n                          enum AVPixelFormat pix_fmt, int width, int height);\n#endif\n/**\n * Copy image src to dst. Wraps av_image_copy().\n */\nvoid av_picture_copy(AVPicture *dst, const AVPicture *src,\n                     enum AVPixelFormat pix_fmt, int width, int height);\n\n/**\n * Crop image top and left side.\n */\nint av_picture_crop(AVPicture *dst, const AVPicture *src,\n                    enum AVPixelFormat pix_fmt, int top_band, int left_band);\n\n/**\n * Pad image.\n */\nint av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt,\n            int padtop, int padbottom, int padleft, int padright, int *color);\n\n/**\n * @}\n */\n\n/**\n * @defgroup lavc_misc Utility functions\n * @ingroup libavc\n *\n * Miscellaneous utility functions related to both encoding and decoding\n * (or neither).\n * @{\n */\n\n/**\n * @defgroup lavc_misc_pixfmt Pixel formats\n *\n * Functions for working with pixel formats.\n * @{\n */\n\n/**\n * Utility function to access log2_chroma_w log2_chroma_h from\n * the pixel format AVPixFmtDescriptor.\n *\n * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample\n * for one that returns a failure code and continues in case of invalid\n * pix_fmts.\n *\n * @param[in]  pix_fmt the pixel format\n * @param[out] h_shift store log2_chroma_w\n * @param[out] v_shift store log2_chroma_h\n *\n * @see av_pix_fmt_get_chroma_sub_sample\n */\n\nvoid avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift);\n\n/**\n * Return a value representing the fourCC code associated to the\n * pixel format pix_fmt, or 0 if no associated fourCC code can be\n * found.\n */\nunsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);\n\n/**\n * @deprecated see av_get_pix_fmt_loss()\n */\nint avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,\n                             int has_alpha);\n\n/**\n * Find the best pixel format to convert to given a certain source pixel\n * format.  When converting from one pixel format to another, information loss\n * may occur.  For example, when converting from RGB24 to GRAY, the color\n * information will be lost. Similarly, other losses occur when converting from\n * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of\n * the given pixel formats should be used to suffer the least amount of loss.\n * The pixel formats from which it chooses one, are determined by the\n * pix_fmt_list parameter.\n *\n *\n * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from\n * @param[in] src_pix_fmt source pixel format\n * @param[in] has_alpha Whether the source pixel format alpha channel is used.\n * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.\n * @return The best pixel format to convert to or -1 if none was found.\n */\nenum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,\n                                            enum AVPixelFormat src_pix_fmt,\n                                            int has_alpha, int *loss_ptr);\n\n/**\n * @deprecated see av_find_best_pix_fmt_of_2()\n */\nenum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,\n                                            enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);\n\nattribute_deprecated\n#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI\nenum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list,\n                                              enum AVPixelFormat src_pix_fmt,\n                                              int has_alpha, int *loss_ptr);\n#else\nenum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,\n                                            enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);\n#endif\n\n\nenum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);\n\n/**\n * @}\n */\n\n#if FF_API_SET_DIMENSIONS\n/**\n * @deprecated this function is not supposed to be used from outside of lavc\n */\nattribute_deprecated\nvoid avcodec_set_dimensions(AVCodecContext *s, int width, int height);\n#endif\n\n/**\n * Put a string representing the codec tag codec_tag in buf.\n *\n * @param buf       buffer to place codec tag in\n * @param buf_size size in bytes of buf\n * @param codec_tag codec tag to assign\n * @return the length of the string that would have been generated if\n * enough space had been available, excluding the trailing null\n */\nsize_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);\n\nvoid avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);\n\n/**\n * Return a name for the specified profile, if available.\n *\n * @param codec the codec that is searched for the given profile\n * @param profile the profile value for which a name is requested\n * @return A name for the profile if found, NULL otherwise.\n */\nconst char *av_get_profile_name(const AVCodec *codec, int profile);\n\nint avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);\nint avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);\n//FIXME func typedef\n\n/**\n * Fill AVFrame audio data and linesize pointers.\n *\n * The buffer buf must be a preallocated buffer with a size big enough\n * to contain the specified samples amount. The filled AVFrame data\n * pointers will point to this buffer.\n *\n * AVFrame extended_data channel pointers are allocated if necessary for\n * planar audio.\n *\n * @param frame       the AVFrame\n *                    frame->nb_samples must be set prior to calling the\n *                    function. This function fills in frame->data,\n *                    frame->extended_data, frame->linesize[0].\n * @param nb_channels channel count\n * @param sample_fmt  sample format\n * @param buf         buffer to use for frame data\n * @param buf_size    size of buffer\n * @param align       plane size sample alignment (0 = default)\n * @return            >=0 on success, negative error code on failure\n * @todo return the size in bytes required to store the samples in\n * case of success, at the next libavutil bump\n */\nint avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,\n                             enum AVSampleFormat sample_fmt, const uint8_t *buf,\n                             int buf_size, int align);\n\n/**\n * Reset the internal decoder state / flush internal buffers. Should be called\n * e.g. when seeking or when switching to a different stream.\n *\n * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0),\n * this invalidates the frames previously returned from the decoder. When\n * refcounted frames are used, the decoder just releases any references it might\n * keep internally, but the caller's reference remains valid.\n */\nvoid avcodec_flush_buffers(AVCodecContext *avctx);\n\n/**\n * Return codec bits per sample.\n *\n * @param[in] codec_id the codec\n * @return Number of bits per sample or zero if unknown for the given codec.\n */\nint av_get_bits_per_sample(enum AVCodecID codec_id);\n\n/**\n * Return the PCM codec associated with a sample format.\n * @param be  endianness, 0 for little, 1 for big,\n *            -1 (or anything else) for native\n * @return  AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE\n */\nenum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);\n\n/**\n * Return codec bits per sample.\n * Only return non-zero if the bits per sample is exactly correct, not an\n * approximation.\n *\n * @param[in] codec_id the codec\n * @return Number of bits per sample or zero if unknown for the given codec.\n */\nint av_get_exact_bits_per_sample(enum AVCodecID codec_id);\n\n/**\n * Return audio frame duration.\n *\n * @param avctx        codec context\n * @param frame_bytes  size of the frame, or 0 if unknown\n * @return             frame duration, in samples, if known. 0 if not able to\n *                     determine.\n */\nint av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);\n\n\ntypedef struct AVBitStreamFilterContext {\n    void *priv_data;\n    struct AVBitStreamFilter *filter;\n    AVCodecParserContext *parser;\n    struct AVBitStreamFilterContext *next;\n} AVBitStreamFilterContext;\n\n\ntypedef struct AVBitStreamFilter {\n    const char *name;\n    int priv_data_size;\n    int (*filter)(AVBitStreamFilterContext *bsfc,\n                  AVCodecContext *avctx, const char *args,\n                  uint8_t **poutbuf, int *poutbuf_size,\n                  const uint8_t *buf, int buf_size, int keyframe);\n    void (*close)(AVBitStreamFilterContext *bsfc);\n    struct AVBitStreamFilter *next;\n} AVBitStreamFilter;\n\n/**\n * Register a bitstream filter.\n *\n * The filter will be accessible to the application code through\n * av_bitstream_filter_next() or can be directly initialized with\n * av_bitstream_filter_init().\n *\n * @see avcodec_register_all()\n */\nvoid av_register_bitstream_filter(AVBitStreamFilter *bsf);\n\n/**\n * Create and initialize a bitstream filter context given a bitstream\n * filter name.\n *\n * The returned context must be freed with av_bitstream_filter_close().\n *\n * @param name    the name of the bitstream filter\n * @return a bitstream filter context if a matching filter was found\n * and successfully initialized, NULL otherwise\n */\nAVBitStreamFilterContext *av_bitstream_filter_init(const char *name);\n\n/**\n * Filter bitstream.\n *\n * This function filters the buffer buf with size buf_size, and places the\n * filtered buffer in the buffer pointed to by poutbuf.\n *\n * The output buffer must be freed by the caller.\n *\n * @param bsfc            bitstream filter context created by av_bitstream_filter_init()\n * @param avctx           AVCodecContext accessed by the filter, may be NULL.\n *                        If specified, this must point to the encoder context of the\n *                        output stream the packet is sent to.\n * @param args            arguments which specify the filter configuration, may be NULL\n * @param poutbuf         pointer which is updated to point to the filtered buffer\n * @param poutbuf_size    pointer which is updated to the filtered buffer size in bytes\n * @param buf             buffer containing the data to filter\n * @param buf_size        size in bytes of buf\n * @param keyframe        set to non-zero if the buffer to filter corresponds to a key-frame packet data\n * @return >= 0 in case of success, or a negative error code in case of failure\n *\n * If the return value is positive, an output buffer is allocated and\n * is available in *poutbuf, and is distinct from the input buffer.\n *\n * If the return value is 0, the output buffer is not allocated and\n * should be considered identical to the input buffer, or in case\n * *poutbuf was set it points to the input buffer (not necessarily to\n * its starting address).\n */\nint av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,\n                               AVCodecContext *avctx, const char *args,\n                               uint8_t **poutbuf, int *poutbuf_size,\n                               const uint8_t *buf, int buf_size, int keyframe);\n\n/**\n * Release bitstream filter context.\n *\n * @param bsf the bitstream filter context created with\n * av_bitstream_filter_init(), can be NULL\n */\nvoid av_bitstream_filter_close(AVBitStreamFilterContext *bsf);\n\n/**\n * If f is NULL, return the first registered bitstream filter,\n * if f is non-NULL, return the next registered bitstream filter\n * after f, or NULL if f is the last one.\n *\n * This function can be used to iterate over all registered bitstream\n * filters.\n */\nAVBitStreamFilter *av_bitstream_filter_next(const AVBitStreamFilter *f);\n\n/* memory */\n\n/**\n * Same behaviour av_fast_malloc but the buffer has additional\n * FF_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.\n *\n * In addition the whole buffer will initially and after resizes\n * be 0-initialized so that no uninitialized data will ever appear.\n */\nvoid av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);\n\n/**\n * Same behaviour av_fast_padded_malloc except that buffer will always\n * be 0-initialized after call.\n */\nvoid av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size);\n\n/**\n * Encode extradata length to a buffer. Used by xiph codecs.\n *\n * @param s buffer to write to; must be at least (v/255+1) bytes long\n * @param v size of extradata in bytes\n * @return number of bytes written to the buffer.\n */\nunsigned int av_xiphlacing(unsigned char *s, unsigned int v);\n\n#if FF_API_MISSING_SAMPLE\n/**\n * Log a generic warning message about a missing feature. This function is\n * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)\n * only, and would normally not be used by applications.\n * @param[in] avc a pointer to an arbitrary struct of which the first field is\n * a pointer to an AVClass struct\n * @param[in] feature string containing the name of the missing feature\n * @param[in] want_sample indicates if samples are wanted which exhibit this feature.\n * If want_sample is non-zero, additional verbage will be added to the log\n * message which tells the user how to report samples to the development\n * mailing list.\n * @deprecated Use avpriv_report_missing_feature() instead.\n */\nattribute_deprecated\nvoid av_log_missing_feature(void *avc, const char *feature, int want_sample);\n\n/**\n * Log a generic warning message asking for a sample. This function is\n * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)\n * only, and would normally not be used by applications.\n * @param[in] avc a pointer to an arbitrary struct of which the first field is\n * a pointer to an AVClass struct\n * @param[in] msg string containing an optional message, or NULL if no message\n * @deprecated Use avpriv_request_sample() instead.\n */\nattribute_deprecated\nvoid av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3);\n#endif /* FF_API_MISSING_SAMPLE */\n\n/**\n * Register the hardware accelerator hwaccel.\n */\nvoid av_register_hwaccel(AVHWAccel *hwaccel);\n\n/**\n * If hwaccel is NULL, returns the first registered hardware accelerator,\n * if hwaccel is non-NULL, returns the next registered hardware accelerator\n * after hwaccel, or NULL if hwaccel is the last one.\n */\nAVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel);\n\n\n/**\n * Lock operation used by lockmgr\n */\nenum AVLockOp {\n  AV_LOCK_CREATE,  ///< Create a mutex\n  AV_LOCK_OBTAIN,  ///< Lock the mutex\n  AV_LOCK_RELEASE, ///< Unlock the mutex\n  AV_LOCK_DESTROY, ///< Free mutex resources\n};\n\n/**\n * Register a user provided lock manager supporting the operations\n * specified by AVLockOp. The \"mutex\" argument to the function points\n * to a (void *) where the lockmgr should store/get a pointer to a user\n * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the\n * value left by the last call for all other ops. If the lock manager is\n * unable to perform the op then it should leave the mutex in the same\n * state as when it was called and return a non-zero value. However,\n * when called with AV_LOCK_DESTROY the mutex will always be assumed to\n * have been successfully destroyed. If av_lockmgr_register succeeds\n * it will return a non-negative value, if it fails it will return a\n * negative value and destroy all mutex and unregister all callbacks.\n * av_lockmgr_register is not thread-safe, it must be called from a\n * single thread before any calls which make use of locking are used.\n *\n * @param cb User defined callback. av_lockmgr_register invokes calls\n *           to this callback and the previously registered callback.\n *           The callback will be used to create more than one mutex\n *           each of which must be backed by its own underlying locking\n *           mechanism (i.e. do not use a single static object to\n *           implement your lock manager). If cb is set to NULL the\n *           lockmgr will be unregistered.\n */\nint av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));\n\n/**\n * Get the type of the given codec.\n */\nenum AVMediaType avcodec_get_type(enum AVCodecID codec_id);\n\n/**\n * Get the name of a codec.\n * @return  a static string identifying the codec; never NULL\n */\nconst char *avcodec_get_name(enum AVCodecID id);\n\n/**\n * @return a positive value if s is open (i.e. avcodec_open2() was called on it\n * with no corresponding avcodec_close()), 0 otherwise.\n */\nint avcodec_is_open(AVCodecContext *s);\n\n/**\n * @return a non-zero number if codec is an encoder, zero otherwise\n */\nint av_codec_is_encoder(const AVCodec *codec);\n\n/**\n * @return a non-zero number if codec is a decoder, zero otherwise\n */\nint av_codec_is_decoder(const AVCodec *codec);\n\n/**\n * @return descriptor for given codec ID or NULL if no descriptor exists.\n */\nconst AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);\n\n/**\n * Iterate over all codec descriptors known to libavcodec.\n *\n * @param prev previous descriptor. NULL to get the first descriptor.\n *\n * @return next descriptor or NULL after the last descriptor\n */\nconst AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);\n\n/**\n * @return codec descriptor with the given name or NULL if no such descriptor\n *         exists.\n */\nconst AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_AVCODEC_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/avfft.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_AVFFT_H\n#define AVCODEC_AVFFT_H\n\n/**\n * @file\n * @ingroup lavc_fft\n * FFT functions\n */\n\n/**\n * @defgroup lavc_fft FFT functions\n * @ingroup lavc_misc\n *\n * @{\n */\n\ntypedef float FFTSample;\n\ntypedef struct FFTComplex {\n    FFTSample re, im;\n} FFTComplex;\n\ntypedef struct FFTContext FFTContext;\n\n/**\n * Set up a complex FFT.\n * @param nbits           log2 of the length of the input array\n * @param inverse         if 0 perform the forward transform, if 1 perform the inverse\n */\nFFTContext *av_fft_init(int nbits, int inverse);\n\n/**\n * Do the permutation needed BEFORE calling ff_fft_calc().\n */\nvoid av_fft_permute(FFTContext *s, FFTComplex *z);\n\n/**\n * Do a complex FFT with the parameters defined in av_fft_init(). The\n * input data must be permuted before. No 1.0/sqrt(n) normalization is done.\n */\nvoid av_fft_calc(FFTContext *s, FFTComplex *z);\n\nvoid av_fft_end(FFTContext *s);\n\nFFTContext *av_mdct_init(int nbits, int inverse, double scale);\nvoid av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);\nvoid av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);\nvoid av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);\nvoid av_mdct_end(FFTContext *s);\n\n/* Real Discrete Fourier Transform */\n\nenum RDFTransformType {\n    DFT_R2C,\n    IDFT_C2R,\n    IDFT_R2C,\n    DFT_C2R,\n};\n\ntypedef struct RDFTContext RDFTContext;\n\n/**\n * Set up a real FFT.\n * @param nbits           log2 of the length of the input array\n * @param trans           the type of transform\n */\nRDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);\nvoid av_rdft_calc(RDFTContext *s, FFTSample *data);\nvoid av_rdft_end(RDFTContext *s);\n\n/* Discrete Cosine Transform */\n\ntypedef struct DCTContext DCTContext;\n\nenum DCTTransformType {\n    DCT_II = 0,\n    DCT_III,\n    DCT_I,\n    DST_I,\n};\n\n/**\n * Set up DCT.\n *\n * @param nbits           size of the input array:\n *                        (1 << nbits)     for DCT-II, DCT-III and DST-I\n *                        (1 << nbits) + 1 for DCT-I\n * @param type            the type of transform\n *\n * @note the first element of the input of DST-I is ignored\n */\nDCTContext *av_dct_init(int nbits, enum DCTTransformType type);\nvoid av_dct_calc(DCTContext *s, FFTSample *data);\nvoid av_dct_end (DCTContext *s);\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_AVFFT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/dv_profile.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_DV_PROFILE_H\n#define AVCODEC_DV_PROFILE_H\n\n#include <stdint.h>\n\n#include \"libavutil/pixfmt.h\"\n#include \"libavutil/rational.h\"\n#include \"avcodec.h\"\n\n/* minimum number of bytes to read from a DV stream in order to\n * determine the profile */\n#define DV_PROFILE_BYTES (6 * 80) /* 6 DIF blocks */\n\n\n/*\n * AVDVProfile is used to express the differences between various\n * DV flavors. For now it's primarily used for differentiating\n * 525/60 and 625/50, but the plans are to use it for various\n * DV specs as well (e.g. SMPTE314M vs. IEC 61834).\n */\ntypedef struct AVDVProfile {\n    int              dsf;                   /* value of the dsf in the DV header */\n    int              video_stype;           /* stype for VAUX source pack */\n    int              frame_size;            /* total size of one frame in bytes */\n    int              difseg_size;           /* number of DIF segments per DIF channel */\n    int              n_difchan;             /* number of DIF channels per frame */\n    AVRational       time_base;             /* 1/framerate */\n    int              ltc_divisor;           /* FPS from the LTS standpoint */\n    int              height;                /* picture height in pixels */\n    int              width;                 /* picture width in pixels */\n    AVRational       sar[2];                /* sample aspect ratios for 4:3 and 16:9 */\n    enum AVPixelFormat pix_fmt;             /* picture pixel format */\n    int              bpm;                   /* blocks per macroblock */\n    const uint8_t   *block_sizes;           /* AC block sizes, in bits */\n    int              audio_stride;          /* size of audio_shuffle table */\n    int              audio_min_samples[3];  /* min amount of audio samples */\n                                            /* for 48kHz, 44.1kHz and 32kHz */\n    int              audio_samples_dist[5]; /* how many samples are supposed to be */\n                                            /* in each frame in a 5 frames window */\n    const uint8_t  (*audio_shuffle)[9];     /* PCM shuffling table */\n} AVDVProfile;\n\n#if FF_API_DV_FRAME_PROFILE\n/**\n * @deprecated use av_dv_frame_profile()\n */\nattribute_deprecated\nconst AVDVProfile* avpriv_dv_frame_profile2(AVCodecContext* codec, const AVDVProfile *sys,\n                                            const uint8_t* frame, unsigned buf_size);\n#endif\n\n/**\n * Get a DV profile for the provided compressed frame.\n *\n * @param sys the profile used for the previous frame, may be NULL\n * @param frame the compressed data buffer\n * @param buf_size size of the buffer in bytes\n * @return the DV profile for the supplied data or NULL on failure\n */\nconst AVDVProfile *av_dv_frame_profile(const AVDVProfile *sys,\n                                       const uint8_t *frame, unsigned buf_size);\n\n/**\n * Get a DV profile for the provided stream parameters.\n */\nconst AVDVProfile *av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt);\n\n/**\n * Get a DV profile for the provided stream parameters.\n * The frame rate is used as a best-effort parameter.\n */\nconst AVDVProfile *av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate);\n\n#endif /* AVCODEC_DV_PROFILE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/dxva2.h",
    "content": "/*\n * DXVA2 HW acceleration\n *\n * copyright (c) 2009 Laurent Aimar\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_DXVA_H\n#define AVCODEC_DXVA_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_dxva2\n * Public libavcodec DXVA2 header.\n */\n\n#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600\n#undef _WIN32_WINNT\n#define _WIN32_WINNT 0x0600\n#endif\n\n#include <stdint.h>\n#include <d3d9.h>\n#include <dxva2api.h>\n\n/**\n * @defgroup lavc_codec_hwaccel_dxva2 DXVA2\n * @ingroup lavc_codec_hwaccel\n *\n * @{\n */\n\n#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards\n#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO    2 ///< Work around for DXVA2 and old Intel GPUs with ClearVideo interface\n\n/**\n * This structure is used to provides the necessary configurations and data\n * to the DXVA2 FFmpeg HWAccel implementation.\n *\n * The application must make it available as AVCodecContext.hwaccel_context.\n */\nstruct dxva_context {\n    /**\n     * DXVA2 decoder object\n     */\n    IDirectXVideoDecoder *decoder;\n\n    /**\n     * DXVA2 configuration used to create the decoder\n     */\n    const DXVA2_ConfigPictureDecode *cfg;\n\n    /**\n     * The number of surface in the surface array\n     */\n    unsigned surface_count;\n\n    /**\n     * The array of Direct3D surfaces used to create the decoder\n     */\n    LPDIRECT3DSURFACE9 *surface;\n\n    /**\n     * A bit field configuring the workarounds needed for using the decoder\n     */\n    uint64_t workaround;\n\n    /**\n     * Private to the FFmpeg AVHWAccel implementation\n     */\n    unsigned report_id;\n};\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_DXVA_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/old_codec_ids.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_OLD_CODEC_IDS_H\n#define AVCODEC_OLD_CODEC_IDS_H\n\n/*\n * This header exists to prevent new codec IDs from being accidentally added to\n * the deprecated list.\n * Do not include it directly. It will be removed on next major bump\n *\n * Do not add new items to this list. Use the AVCodecID enum instead.\n */\n\n    CODEC_ID_NONE = AV_CODEC_ID_NONE,\n\n    /* video codecs */\n    CODEC_ID_MPEG1VIDEO,\n    CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding\n#if FF_API_XVMC\n    CODEC_ID_MPEG2VIDEO_XVMC,\n#endif\n    CODEC_ID_H261,\n    CODEC_ID_H263,\n    CODEC_ID_RV10,\n    CODEC_ID_RV20,\n    CODEC_ID_MJPEG,\n    CODEC_ID_MJPEGB,\n    CODEC_ID_LJPEG,\n    CODEC_ID_SP5X,\n    CODEC_ID_JPEGLS,\n    CODEC_ID_MPEG4,\n    CODEC_ID_RAWVIDEO,\n    CODEC_ID_MSMPEG4V1,\n    CODEC_ID_MSMPEG4V2,\n    CODEC_ID_MSMPEG4V3,\n    CODEC_ID_WMV1,\n    CODEC_ID_WMV2,\n    CODEC_ID_H263P,\n    CODEC_ID_H263I,\n    CODEC_ID_FLV1,\n    CODEC_ID_SVQ1,\n    CODEC_ID_SVQ3,\n    CODEC_ID_DVVIDEO,\n    CODEC_ID_HUFFYUV,\n    CODEC_ID_CYUV,\n    CODEC_ID_H264,\n    CODEC_ID_INDEO3,\n    CODEC_ID_VP3,\n    CODEC_ID_THEORA,\n    CODEC_ID_ASV1,\n    CODEC_ID_ASV2,\n    CODEC_ID_FFV1,\n    CODEC_ID_4XM,\n    CODEC_ID_VCR1,\n    CODEC_ID_CLJR,\n    CODEC_ID_MDEC,\n    CODEC_ID_ROQ,\n    CODEC_ID_INTERPLAY_VIDEO,\n    CODEC_ID_XAN_WC3,\n    CODEC_ID_XAN_WC4,\n    CODEC_ID_RPZA,\n    CODEC_ID_CINEPAK,\n    CODEC_ID_WS_VQA,\n    CODEC_ID_MSRLE,\n    CODEC_ID_MSVIDEO1,\n    CODEC_ID_IDCIN,\n    CODEC_ID_8BPS,\n    CODEC_ID_SMC,\n    CODEC_ID_FLIC,\n    CODEC_ID_TRUEMOTION1,\n    CODEC_ID_VMDVIDEO,\n    CODEC_ID_MSZH,\n    CODEC_ID_ZLIB,\n    CODEC_ID_QTRLE,\n    CODEC_ID_TSCC,\n    CODEC_ID_ULTI,\n    CODEC_ID_QDRAW,\n    CODEC_ID_VIXL,\n    CODEC_ID_QPEG,\n    CODEC_ID_PNG,\n    CODEC_ID_PPM,\n    CODEC_ID_PBM,\n    CODEC_ID_PGM,\n    CODEC_ID_PGMYUV,\n    CODEC_ID_PAM,\n    CODEC_ID_FFVHUFF,\n    CODEC_ID_RV30,\n    CODEC_ID_RV40,\n    CODEC_ID_VC1,\n    CODEC_ID_WMV3,\n    CODEC_ID_LOCO,\n    CODEC_ID_WNV1,\n    CODEC_ID_AASC,\n    CODEC_ID_INDEO2,\n    CODEC_ID_FRAPS,\n    CODEC_ID_TRUEMOTION2,\n    CODEC_ID_BMP,\n    CODEC_ID_CSCD,\n    CODEC_ID_MMVIDEO,\n    CODEC_ID_ZMBV,\n    CODEC_ID_AVS,\n    CODEC_ID_SMACKVIDEO,\n    CODEC_ID_NUV,\n    CODEC_ID_KMVC,\n    CODEC_ID_FLASHSV,\n    CODEC_ID_CAVS,\n    CODEC_ID_JPEG2000,\n    CODEC_ID_VMNC,\n    CODEC_ID_VP5,\n    CODEC_ID_VP6,\n    CODEC_ID_VP6F,\n    CODEC_ID_TARGA,\n    CODEC_ID_DSICINVIDEO,\n    CODEC_ID_TIERTEXSEQVIDEO,\n    CODEC_ID_TIFF,\n    CODEC_ID_GIF,\n    CODEC_ID_DXA,\n    CODEC_ID_DNXHD,\n    CODEC_ID_THP,\n    CODEC_ID_SGI,\n    CODEC_ID_C93,\n    CODEC_ID_BETHSOFTVID,\n    CODEC_ID_PTX,\n    CODEC_ID_TXD,\n    CODEC_ID_VP6A,\n    CODEC_ID_AMV,\n    CODEC_ID_VB,\n    CODEC_ID_PCX,\n    CODEC_ID_SUNRAST,\n    CODEC_ID_INDEO4,\n    CODEC_ID_INDEO5,\n    CODEC_ID_MIMIC,\n    CODEC_ID_RL2,\n    CODEC_ID_ESCAPE124,\n    CODEC_ID_DIRAC,\n    CODEC_ID_BFI,\n    CODEC_ID_CMV,\n    CODEC_ID_MOTIONPIXELS,\n    CODEC_ID_TGV,\n    CODEC_ID_TGQ,\n    CODEC_ID_TQI,\n    CODEC_ID_AURA,\n    CODEC_ID_AURA2,\n    CODEC_ID_V210X,\n    CODEC_ID_TMV,\n    CODEC_ID_V210,\n    CODEC_ID_DPX,\n    CODEC_ID_MAD,\n    CODEC_ID_FRWU,\n    CODEC_ID_FLASHSV2,\n    CODEC_ID_CDGRAPHICS,\n    CODEC_ID_R210,\n    CODEC_ID_ANM,\n    CODEC_ID_BINKVIDEO,\n    CODEC_ID_IFF_ILBM,\n    CODEC_ID_IFF_BYTERUN1,\n    CODEC_ID_KGV1,\n    CODEC_ID_YOP,\n    CODEC_ID_VP8,\n    CODEC_ID_PICTOR,\n    CODEC_ID_ANSI,\n    CODEC_ID_A64_MULTI,\n    CODEC_ID_A64_MULTI5,\n    CODEC_ID_R10K,\n    CODEC_ID_MXPEG,\n    CODEC_ID_LAGARITH,\n    CODEC_ID_PRORES,\n    CODEC_ID_JV,\n    CODEC_ID_DFA,\n    CODEC_ID_WMV3IMAGE,\n    CODEC_ID_VC1IMAGE,\n    CODEC_ID_UTVIDEO,\n    CODEC_ID_BMV_VIDEO,\n    CODEC_ID_VBLE,\n    CODEC_ID_DXTORY,\n    CODEC_ID_V410,\n    CODEC_ID_XWD,\n    CODEC_ID_CDXL,\n    CODEC_ID_XBM,\n    CODEC_ID_ZEROCODEC,\n    CODEC_ID_MSS1,\n    CODEC_ID_MSA1,\n    CODEC_ID_TSCC2,\n    CODEC_ID_MTS2,\n    CODEC_ID_CLLC,\n    CODEC_ID_Y41P       = MKBETAG('Y','4','1','P'),\n    CODEC_ID_ESCAPE130  = MKBETAG('E','1','3','0'),\n    CODEC_ID_EXR        = MKBETAG('0','E','X','R'),\n    CODEC_ID_AVRP       = MKBETAG('A','V','R','P'),\n\n    CODEC_ID_G2M        = MKBETAG( 0 ,'G','2','M'),\n    CODEC_ID_AVUI       = MKBETAG('A','V','U','I'),\n    CODEC_ID_AYUV       = MKBETAG('A','Y','U','V'),\n    CODEC_ID_V308       = MKBETAG('V','3','0','8'),\n    CODEC_ID_V408       = MKBETAG('V','4','0','8'),\n    CODEC_ID_YUV4       = MKBETAG('Y','U','V','4'),\n    CODEC_ID_SANM       = MKBETAG('S','A','N','M'),\n    CODEC_ID_PAF_VIDEO  = MKBETAG('P','A','F','V'),\n    CODEC_ID_SNOW       = AV_CODEC_ID_SNOW,\n\n    /* various PCM \"codecs\" */\n    CODEC_ID_FIRST_AUDIO = 0x10000,     ///< A dummy id pointing at the start of audio codecs\n    CODEC_ID_PCM_S16LE = 0x10000,\n    CODEC_ID_PCM_S16BE,\n    CODEC_ID_PCM_U16LE,\n    CODEC_ID_PCM_U16BE,\n    CODEC_ID_PCM_S8,\n    CODEC_ID_PCM_U8,\n    CODEC_ID_PCM_MULAW,\n    CODEC_ID_PCM_ALAW,\n    CODEC_ID_PCM_S32LE,\n    CODEC_ID_PCM_S32BE,\n    CODEC_ID_PCM_U32LE,\n    CODEC_ID_PCM_U32BE,\n    CODEC_ID_PCM_S24LE,\n    CODEC_ID_PCM_S24BE,\n    CODEC_ID_PCM_U24LE,\n    CODEC_ID_PCM_U24BE,\n    CODEC_ID_PCM_S24DAUD,\n    CODEC_ID_PCM_ZORK,\n    CODEC_ID_PCM_S16LE_PLANAR,\n    CODEC_ID_PCM_DVD,\n    CODEC_ID_PCM_F32BE,\n    CODEC_ID_PCM_F32LE,\n    CODEC_ID_PCM_F64BE,\n    CODEC_ID_PCM_F64LE,\n    CODEC_ID_PCM_BLURAY,\n    CODEC_ID_PCM_LXF,\n    CODEC_ID_S302M,\n    CODEC_ID_PCM_S8_PLANAR,\n\n    /* various ADPCM codecs */\n    CODEC_ID_ADPCM_IMA_QT = 0x11000,\n    CODEC_ID_ADPCM_IMA_WAV,\n    CODEC_ID_ADPCM_IMA_DK3,\n    CODEC_ID_ADPCM_IMA_DK4,\n    CODEC_ID_ADPCM_IMA_WS,\n    CODEC_ID_ADPCM_IMA_SMJPEG,\n    CODEC_ID_ADPCM_MS,\n    CODEC_ID_ADPCM_4XM,\n    CODEC_ID_ADPCM_XA,\n    CODEC_ID_ADPCM_ADX,\n    CODEC_ID_ADPCM_EA,\n    CODEC_ID_ADPCM_G726,\n    CODEC_ID_ADPCM_CT,\n    CODEC_ID_ADPCM_SWF,\n    CODEC_ID_ADPCM_YAMAHA,\n    CODEC_ID_ADPCM_SBPRO_4,\n    CODEC_ID_ADPCM_SBPRO_3,\n    CODEC_ID_ADPCM_SBPRO_2,\n    CODEC_ID_ADPCM_THP,\n    CODEC_ID_ADPCM_IMA_AMV,\n    CODEC_ID_ADPCM_EA_R1,\n    CODEC_ID_ADPCM_EA_R3,\n    CODEC_ID_ADPCM_EA_R2,\n    CODEC_ID_ADPCM_IMA_EA_SEAD,\n    CODEC_ID_ADPCM_IMA_EA_EACS,\n    CODEC_ID_ADPCM_EA_XAS,\n    CODEC_ID_ADPCM_EA_MAXIS_XA,\n    CODEC_ID_ADPCM_IMA_ISS,\n    CODEC_ID_ADPCM_G722,\n    CODEC_ID_ADPCM_IMA_APC,\n    CODEC_ID_VIMA       = MKBETAG('V','I','M','A'),\n\n    /* AMR */\n    CODEC_ID_AMR_NB = 0x12000,\n    CODEC_ID_AMR_WB,\n\n    /* RealAudio codecs*/\n    CODEC_ID_RA_144 = 0x13000,\n    CODEC_ID_RA_288,\n\n    /* various DPCM codecs */\n    CODEC_ID_ROQ_DPCM = 0x14000,\n    CODEC_ID_INTERPLAY_DPCM,\n    CODEC_ID_XAN_DPCM,\n    CODEC_ID_SOL_DPCM,\n\n    /* audio codecs */\n    CODEC_ID_MP2 = 0x15000,\n    CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3\n    CODEC_ID_AAC,\n    CODEC_ID_AC3,\n    CODEC_ID_DTS,\n    CODEC_ID_VORBIS,\n    CODEC_ID_DVAUDIO,\n    CODEC_ID_WMAV1,\n    CODEC_ID_WMAV2,\n    CODEC_ID_MACE3,\n    CODEC_ID_MACE6,\n    CODEC_ID_VMDAUDIO,\n    CODEC_ID_FLAC,\n    CODEC_ID_MP3ADU,\n    CODEC_ID_MP3ON4,\n    CODEC_ID_SHORTEN,\n    CODEC_ID_ALAC,\n    CODEC_ID_WESTWOOD_SND1,\n    CODEC_ID_GSM, ///< as in Berlin toast format\n    CODEC_ID_QDM2,\n    CODEC_ID_COOK,\n    CODEC_ID_TRUESPEECH,\n    CODEC_ID_TTA,\n    CODEC_ID_SMACKAUDIO,\n    CODEC_ID_QCELP,\n    CODEC_ID_WAVPACK,\n    CODEC_ID_DSICINAUDIO,\n    CODEC_ID_IMC,\n    CODEC_ID_MUSEPACK7,\n    CODEC_ID_MLP,\n    CODEC_ID_GSM_MS, /* as found in WAV */\n    CODEC_ID_ATRAC3,\n    CODEC_ID_VOXWARE,\n    CODEC_ID_APE,\n    CODEC_ID_NELLYMOSER,\n    CODEC_ID_MUSEPACK8,\n    CODEC_ID_SPEEX,\n    CODEC_ID_WMAVOICE,\n    CODEC_ID_WMAPRO,\n    CODEC_ID_WMALOSSLESS,\n    CODEC_ID_ATRAC3P,\n    CODEC_ID_EAC3,\n    CODEC_ID_SIPR,\n    CODEC_ID_MP1,\n    CODEC_ID_TWINVQ,\n    CODEC_ID_TRUEHD,\n    CODEC_ID_MP4ALS,\n    CODEC_ID_ATRAC1,\n    CODEC_ID_BINKAUDIO_RDFT,\n    CODEC_ID_BINKAUDIO_DCT,\n    CODEC_ID_AAC_LATM,\n    CODEC_ID_QDMC,\n    CODEC_ID_CELT,\n    CODEC_ID_G723_1,\n    CODEC_ID_G729,\n    CODEC_ID_8SVX_EXP,\n    CODEC_ID_8SVX_FIB,\n    CODEC_ID_BMV_AUDIO,\n    CODEC_ID_RALF,\n    CODEC_ID_IAC,\n    CODEC_ID_ILBC,\n    CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),\n    CODEC_ID_SONIC       = MKBETAG('S','O','N','C'),\n    CODEC_ID_SONIC_LS    = MKBETAG('S','O','N','L'),\n    CODEC_ID_PAF_AUDIO   = MKBETAG('P','A','F','A'),\n    CODEC_ID_OPUS        = MKBETAG('O','P','U','S'),\n\n    /* subtitle codecs */\n    CODEC_ID_FIRST_SUBTITLE = 0x17000,          ///< A dummy ID pointing at the start of subtitle codecs.\n    CODEC_ID_DVD_SUBTITLE = 0x17000,\n    CODEC_ID_DVB_SUBTITLE,\n    CODEC_ID_TEXT,  ///< raw UTF-8 text\n    CODEC_ID_XSUB,\n    CODEC_ID_SSA,\n    CODEC_ID_MOV_TEXT,\n    CODEC_ID_HDMV_PGS_SUBTITLE,\n    CODEC_ID_DVB_TELETEXT,\n    CODEC_ID_SRT,\n    CODEC_ID_MICRODVD   = MKBETAG('m','D','V','D'),\n    CODEC_ID_EIA_608    = MKBETAG('c','6','0','8'),\n    CODEC_ID_JACOSUB    = MKBETAG('J','S','U','B'),\n    CODEC_ID_SAMI       = MKBETAG('S','A','M','I'),\n    CODEC_ID_REALTEXT   = MKBETAG('R','T','X','T'),\n    CODEC_ID_SUBVIEWER  = MKBETAG('S','u','b','V'),\n\n    /* other specific kind of codecs (generally used for attachments) */\n    CODEC_ID_FIRST_UNKNOWN = 0x18000,           ///< A dummy ID pointing at the start of various fake codecs.\n    CODEC_ID_TTF = 0x18000,\n    CODEC_ID_BINTEXT    = MKBETAG('B','T','X','T'),\n    CODEC_ID_XBIN       = MKBETAG('X','B','I','N'),\n    CODEC_ID_IDF        = MKBETAG( 0 ,'I','D','F'),\n    CODEC_ID_OTF        = MKBETAG( 0 ,'O','T','F'),\n\n    CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it\n\n    CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS\n                                * stream (only used by libavformat) */\n    CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems\n                                * stream (only used by libavformat) */\n    CODEC_ID_FFMETADATA = 0x21000,   ///< Dummy codec for streams containing only metadata information.\n\n#endif /* AVCODEC_OLD_CODEC_IDS_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/qsv.h",
    "content": "/*\n * Intel MediaSDK QSV public API\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_QSV_H\n#define AVCODEC_QSV_H\n\n#include <mfx/mfxvideo.h>\n\ntypedef struct AVQSVContext {\n    mfxSession session;\n    int iopattern;\n\n    mfxExtBuffer **ext_buffers;\n    int         nb_ext_buffers;\n} AVQSVContext;\n\n/**\n * Allocate a new context.\n *\n * It must be freed by the caller with av_free().\n */\nAVQSVContext *av_qsv_alloc_context(void);\n\n#endif /* AVCODEC_QSV_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/vaapi.h",
    "content": "/*\n * Video Acceleration API (shared data between FFmpeg and the video player)\n * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1\n *\n * Copyright (C) 2008-2009 Splitted-Desktop Systems\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_VAAPI_H\n#define AVCODEC_VAAPI_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_vaapi\n * Public libavcodec VA API header.\n */\n\n#include <stdint.h>\n\n/**\n * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding\n * @ingroup lavc_codec_hwaccel\n * @{\n */\n\n/**\n * This structure is used to share data between the FFmpeg library and\n * the client video application.\n * This shall be zero-allocated and available as\n * AVCodecContext.hwaccel_context. All user members can be set once\n * during initialization or through each AVCodecContext.get_buffer()\n * function call. In any case, they must be valid prior to calling\n * decoding functions.\n */\nstruct vaapi_context {\n    /**\n     * Window system dependent data\n     *\n     * - encoding: unused\n     * - decoding: Set by user\n     */\n    void *display;\n\n    /**\n     * Configuration ID\n     *\n     * - encoding: unused\n     * - decoding: Set by user\n     */\n    uint32_t config_id;\n\n    /**\n     * Context ID (video decode pipeline)\n     *\n     * - encoding: unused\n     * - decoding: Set by user\n     */\n    uint32_t context_id;\n\n    /**\n     * VAPictureParameterBuffer ID\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t pic_param_buf_id;\n\n    /**\n     * VAIQMatrixBuffer ID\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t iq_matrix_buf_id;\n\n    /**\n     * VABitPlaneBuffer ID (for VC-1 decoding)\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t bitplane_buf_id;\n\n    /**\n     * Slice parameter/data buffer IDs\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t *slice_buf_ids;\n\n    /**\n     * Number of effective slice buffer IDs to send to the HW\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int n_slice_buf_ids;\n\n    /**\n     * Size of pre-allocated slice_buf_ids\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int slice_buf_ids_alloc;\n\n    /**\n     * Pointer to VASliceParameterBuffers\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    void *slice_params;\n\n    /**\n     * Size of a VASliceParameterBuffer element\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int slice_param_size;\n\n    /**\n     * Size of pre-allocated slice_params\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int slice_params_alloc;\n\n    /**\n     * Number of slices currently filled in\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    unsigned int slice_count;\n\n    /**\n     * Pointer to slice data buffer base\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    const uint8_t *slice_data;\n\n    /**\n     * Current size of slice data\n     *\n     * - encoding: unused\n     * - decoding: Set by libavcodec\n     */\n    uint32_t slice_data_size;\n};\n\n/* @} */\n\n#endif /* AVCODEC_VAAPI_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/vda.h",
    "content": "/*\n * VDA HW acceleration\n *\n * copyright (c) 2011 Sebastien Zwickert\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_VDA_H\n#define AVCODEC_VDA_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_vda\n * Public libavcodec VDA header.\n */\n\n#include \"libavcodec/avcodec.h\"\n\n#include <stdint.h>\n\n// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes\n// http://openradar.appspot.com/8026390\n#undef __GNUC_STDC_INLINE__\n\n#define Picture QuickdrawPicture\n#include <VideoDecodeAcceleration/VDADecoder.h>\n#undef Picture\n\n#include \"libavcodec/version.h\"\n\n// extra flags not defined in VDADecoder.h\nenum {\n    kVDADecodeInfo_Asynchronous = 1UL << 0,\n    kVDADecodeInfo_FrameDropped = 1UL << 1\n};\n\n/**\n * @defgroup lavc_codec_hwaccel_vda VDA\n * @ingroup lavc_codec_hwaccel\n *\n * @{\n */\n\n/**\n * This structure is used to provide the necessary configurations and data\n * to the VDA FFmpeg HWAccel implementation.\n *\n * The application must make it available as AVCodecContext.hwaccel_context.\n */\nstruct vda_context {\n    /**\n     * VDA decoder object.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by libavcodec.\n     */\n    VDADecoder          decoder;\n\n    /**\n     * The Core Video pixel buffer that contains the current image data.\n     *\n     * encoding: unused\n     * decoding: Set by libavcodec. Unset by user.\n     */\n    CVPixelBufferRef    cv_buffer;\n\n    /**\n     * Use the hardware decoder in synchronous mode.\n     *\n     * encoding: unused\n     * decoding: Set by user.\n     */\n    int                 use_sync_decoding;\n\n    /**\n     * The frame width.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by user.\n     */\n    int                 width;\n\n    /**\n     * The frame height.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by user.\n     */\n    int                 height;\n\n    /**\n     * The frame format.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by user.\n     */\n    int                 format;\n\n    /**\n     * The pixel format for output image buffers.\n     *\n     * - encoding: unused\n     * - decoding: Set/Unset by user.\n     */\n    OSType              cv_pix_fmt_type;\n\n    /**\n     * unused\n     */\n    uint8_t             *priv_bitstream;\n\n    /**\n     * unused\n     */\n    int                 priv_bitstream_size;\n\n    /**\n     * unused\n     */\n    int                 priv_allocated_size;\n\n    /**\n     * Use av_buffer to manage buffer.\n     * When the flag is set, the CVPixelBuffers returned by the decoder will\n     * be released automatically, so you have to retain them if necessary.\n     * Not setting this flag may cause memory leak.\n     *\n     * encoding: unused\n     * decoding: Set by user.\n     */\n    int                 use_ref_buffer;\n};\n\n/** Create the video decoder. */\nint ff_vda_create_decoder(struct vda_context *vda_ctx,\n                          uint8_t *extradata,\n                          int extradata_size);\n\n/** Destroy the video decoder. */\nint ff_vda_destroy_decoder(struct vda_context *vda_ctx);\n\n/**\n * This struct holds all the information that needs to be passed\n * between the caller and libavcodec for initializing VDA decoding.\n * Its size is not a part of the public ABI, it must be allocated with\n * av_vda_alloc_context() and freed with av_free().\n */\ntypedef struct AVVDAContext {\n    /**\n     * VDA decoder object. Created and freed by the caller.\n     */\n    VDADecoder decoder;\n\n    /**\n     * The output callback that must be passed to VDADecoderCreate.\n     * Set by av_vda_alloc_context().\n     */\n    VDADecoderOutputCallback output_callback;\n} AVVDAContext;\n\n/**\n * Allocate and initialize a VDA context.\n *\n * This function should be called from the get_format() callback when the caller\n * selects the AV_PIX_FMT_VDA format. The caller must then create the decoder\n * object (using the output callback provided by libavcodec) that will be used\n * for VDA-accelerated decoding.\n *\n * When decoding with VDA is finished, the caller must destroy the decoder\n * object and free the VDA context using av_free().\n *\n * @return the newly allocated context or NULL on failure\n */\nAVVDAContext *av_vda_alloc_context(void);\n\n/**\n * This is a convenience function that creates and sets up the VDA context using\n * an internal implementation.\n *\n * @param avctx the corresponding codec context\n *\n * @return >= 0 on success, a negative AVERROR code on failure\n */\nint av_vda_default_init(AVCodecContext *avctx);\n\n/**\n * This function must be called to free the VDA context initialized with\n * av_vda_default_init().\n *\n * @param avctx the corresponding codec context\n */\nvoid av_vda_default_free(AVCodecContext *avctx);\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_VDA_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/vdpau.h",
    "content": "/*\n * The Video Decode and Presentation API for UNIX (VDPAU) is used for\n * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.\n *\n * Copyright (C) 2008 NVIDIA\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_VDPAU_H\n#define AVCODEC_VDPAU_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_vdpau\n * Public libavcodec VDPAU header.\n */\n\n\n/**\n * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer\n * @ingroup lavc_codec_hwaccel\n *\n * VDPAU hardware acceleration has two modules\n * - VDPAU decoding\n * - VDPAU presentation\n *\n * The VDPAU decoding module parses all headers using FFmpeg\n * parsing mechanisms and uses VDPAU for the actual decoding.\n *\n * As per the current implementation, the actual decoding\n * and rendering (API calls) are done as part of the VDPAU\n * presentation (vo_vdpau.c) module.\n *\n * @{\n */\n\n#include <vdpau/vdpau.h>\n#include <vdpau/vdpau_x11.h>\n#include \"libavutil/avconfig.h\"\n#include \"libavutil/attributes.h\"\n\n#include \"avcodec.h\"\n#include \"version.h\"\n\n#if FF_API_BUFS_VDPAU\nunion AVVDPAUPictureInfo {\n    VdpPictureInfoH264        h264;\n    VdpPictureInfoMPEG1Or2    mpeg;\n    VdpPictureInfoVC1          vc1;\n    VdpPictureInfoMPEG4Part2 mpeg4;\n};\n#endif\n\nstruct AVCodecContext;\nstruct AVFrame;\n\ntypedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *,\n                               const VdpPictureInfo *, uint32_t,\n                               const VdpBitstreamBuffer *);\n\n/**\n * This structure is used to share data between the libavcodec library and\n * the client video application.\n * The user shall allocate the structure via the av_alloc_vdpau_hwaccel\n * function and make it available as\n * AVCodecContext.hwaccel_context. Members can be set by the user once\n * during initialization or through each AVCodecContext.get_buffer()\n * function call. In any case, they must be valid prior to calling\n * decoding functions.\n *\n * The size of this structure is not a part of the public ABI and must not\n * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an\n * AVVDPAUContext.\n */\ntypedef struct AVVDPAUContext {\n    /**\n     * VDPAU decoder handle\n     *\n     * Set by user.\n     */\n    VdpDecoder decoder;\n\n    /**\n     * VDPAU decoder render callback\n     *\n     * Set by the user.\n     */\n    VdpDecoderRender *render;\n\n#if FF_API_BUFS_VDPAU\n    /**\n     * VDPAU picture information\n     *\n     * Set by libavcodec.\n     */\n    attribute_deprecated\n    union AVVDPAUPictureInfo info;\n\n    /**\n     * Allocated size of the bitstream_buffers table.\n     *\n     * Set by libavcodec.\n     */\n    attribute_deprecated\n    int bitstream_buffers_allocated;\n\n    /**\n     * Useful bitstream buffers in the bitstream buffers table.\n     *\n     * Set by libavcodec.\n     */\n    attribute_deprecated\n    int bitstream_buffers_used;\n\n   /**\n     * Table of bitstream buffers.\n     * The user is responsible for freeing this buffer using av_freep().\n     *\n     * Set by libavcodec.\n     */\n    attribute_deprecated\n    VdpBitstreamBuffer *bitstream_buffers;\n#endif\n    AVVDPAU_Render2 render2;\n} AVVDPAUContext;\n\n/**\n * @brief allocation function for AVVDPAUContext\n *\n * Allows extending the struct without breaking API/ABI\n */\nAVVDPAUContext *av_alloc_vdpaucontext(void);\n\nAVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *);\nvoid av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2);\n\n/**\n * Associate a VDPAU device with a codec context for hardware acceleration.\n * This function is meant to be called from the get_format() codec callback,\n * or earlier. It can also be called after avcodec_flush_buffers() to change\n * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent\n * display preemption).\n *\n * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes\n * successfully.\n *\n * @param avctx decoding context whose get_format() callback is invoked\n * @param device VDPAU device handle to use for hardware acceleration\n * @param get_proc_address VDPAU device driver\n * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags\n *\n * @return 0 on success, an AVERROR code on failure.\n */\nint av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,\n                          VdpGetProcAddress *get_proc_address, unsigned flags);\n\n/**\n * Gets the parameters to create an adequate VDPAU video surface for the codec\n * context using VDPAU hardware decoding acceleration.\n *\n * @note Behavior is undefined if the context was not successfully bound to a\n * VDPAU device using av_vdpau_bind_context().\n *\n * @param avctx the codec context being used for decoding the stream\n * @param type storage space for the VDPAU video surface chroma type\n *              (or NULL to ignore)\n * @param width storage space for the VDPAU video surface pixel width\n *              (or NULL to ignore)\n * @param height storage space for the VDPAU video surface pixel height\n *              (or NULL to ignore)\n *\n * @return 0 on success, a negative AVERROR code on failure.\n */\nint av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type,\n                                    uint32_t *width, uint32_t *height);\n\n/**\n * Allocate an AVVDPAUContext.\n *\n * @return Newly-allocated AVVDPAUContext or NULL on failure.\n */\nAVVDPAUContext *av_vdpau_alloc_context(void);\n\n/**\n * Get a decoder profile that should be used for initializing a VDPAU decoder.\n * Should be called from the AVCodecContext.get_format() callback.\n *\n * @param avctx the codec context being used for decoding the stream\n * @param profile a pointer into which the result will be written on success.\n *                The contents of profile are undefined if this function returns\n *                an error.\n *\n * @return 0 on success (non-negative), a negative AVERROR on failure.\n */\nint av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);\n\n#if FF_API_CAP_VDPAU\n/** @brief The videoSurface is used for rendering. */\n#define FF_VDPAU_STATE_USED_FOR_RENDER 1\n\n/**\n * @brief The videoSurface is needed for reference/prediction.\n * The codec manipulates this.\n */\n#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2\n\n/**\n * @brief This structure is used as a callback between the FFmpeg\n * decoder (vd_) and presentation (vo_) module.\n * This is used for defining a video frame containing surface,\n * picture parameter, bitstream information etc which are passed\n * between the FFmpeg decoder and its clients.\n */\nstruct vdpau_render_state {\n    VdpVideoSurface surface; ///< Used as rendered surface, never changed.\n\n    int state; ///< Holds FF_VDPAU_STATE_* values.\n\n#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI\n    /** picture parameter information for all supported codecs */\n    union AVVDPAUPictureInfo info;\n#endif\n\n    /** Describe size/location of the compressed video data.\n        Set to 0 when freeing bitstream_buffers. */\n    int bitstream_buffers_allocated;\n    int bitstream_buffers_used;\n    /** The user is responsible for freeing this buffer using av_freep(). */\n    VdpBitstreamBuffer *bitstream_buffers;\n\n#if !AV_HAVE_INCOMPATIBLE_LIBAV_ABI\n    /** picture parameter information for all supported codecs */\n    union AVVDPAUPictureInfo info;\n#endif\n};\n#endif\n\n/* @}*/\n\n#endif /* AVCODEC_VDPAU_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/version.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_VERSION_H\n#define AVCODEC_VERSION_H\n\n/**\n * @file\n * @ingroup libavc\n * Libavcodec version macros.\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBAVCODEC_VERSION_MAJOR 56\n#define LIBAVCODEC_VERSION_MINOR  26\n#define LIBAVCODEC_VERSION_MICRO 100\n\n#define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \\\n                                               LIBAVCODEC_VERSION_MINOR, \\\n                                               LIBAVCODEC_VERSION_MICRO)\n#define LIBAVCODEC_VERSION      AV_VERSION(LIBAVCODEC_VERSION_MAJOR,    \\\n                                           LIBAVCODEC_VERSION_MINOR,    \\\n                                           LIBAVCODEC_VERSION_MICRO)\n#define LIBAVCODEC_BUILD        LIBAVCODEC_VERSION_INT\n\n#define LIBAVCODEC_IDENT        \"Lavc\" AV_STRINGIFY(LIBAVCODEC_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n\n#ifndef FF_API_VIMA_DECODER\n#define FF_API_VIMA_DECODER     (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_REQUEST_CHANNELS\n#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_OLD_DECODE_AUDIO\n#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_OLD_ENCODE_AUDIO\n#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_OLD_ENCODE_VIDEO\n#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_CODEC_ID\n#define FF_API_CODEC_ID          (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_AUDIO_CONVERT\n#define FF_API_AUDIO_CONVERT     (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_AVCODEC_RESAMPLE\n#define FF_API_AVCODEC_RESAMPLE  FF_API_AUDIO_CONVERT\n#endif\n#ifndef FF_API_DEINTERLACE\n#define FF_API_DEINTERLACE       (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_DESTRUCT_PACKET\n#define FF_API_DESTRUCT_PACKET   (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_GET_BUFFER\n#define FF_API_GET_BUFFER        (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_MISSING_SAMPLE\n#define FF_API_MISSING_SAMPLE    (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_LOWRES\n#define FF_API_LOWRES            (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_CAP_VDPAU\n#define FF_API_CAP_VDPAU         (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_BUFS_VDPAU\n#define FF_API_BUFS_VDPAU        (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_VOXWARE\n#define FF_API_VOXWARE           (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_SET_DIMENSIONS\n#define FF_API_SET_DIMENSIONS    (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_DEBUG_MV\n#define FF_API_DEBUG_MV          (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_AC_VLC\n#define FF_API_AC_VLC            (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_OLD_MSMPEG4\n#define FF_API_OLD_MSMPEG4       (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_ASPECT_EXTENDED\n#define FF_API_ASPECT_EXTENDED   (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_THREAD_OPAQUE\n#define FF_API_THREAD_OPAQUE     (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_CODEC_PKT\n#define FF_API_CODEC_PKT         (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_ARCH_ALPHA\n#define FF_API_ARCH_ALPHA        (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_XVMC\n#define FF_API_XVMC              (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_ERROR_RATE\n#define FF_API_ERROR_RATE        (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_QSCALE_TYPE\n#define FF_API_QSCALE_TYPE       (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_MB_TYPE\n#define FF_API_MB_TYPE           (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_MAX_BFRAMES\n#define FF_API_MAX_BFRAMES       (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_NEG_LINESIZES\n#define FF_API_NEG_LINESIZES     (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_EMU_EDGE\n#define FF_API_EMU_EDGE          (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_ARCH_SH4\n#define FF_API_ARCH_SH4          (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_ARCH_SPARC\n#define FF_API_ARCH_SPARC        (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_UNUSED_MEMBERS\n#define FF_API_UNUSED_MEMBERS    (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_IDCT_XVIDMMX\n#define FF_API_IDCT_XVIDMMX      (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_INPUT_PRESERVED\n#define FF_API_INPUT_PRESERVED   (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_NORMALIZE_AQP\n#define FF_API_NORMALIZE_AQP     (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_GMC\n#define FF_API_GMC               (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_MV0\n#define FF_API_MV0               (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_CODEC_NAME\n#define FF_API_CODEC_NAME        (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_AFD\n#define FF_API_AFD               (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_VISMV\n/* XXX: don't forget to drop the -vismv documentation */\n#define FF_API_VISMV             (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_DV_FRAME_PROFILE\n#define FF_API_DV_FRAME_PROFILE  (LIBAVCODEC_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_AUDIOENC_DELAY\n#define FF_API_AUDIOENC_DELAY    (LIBAVCODEC_VERSION_MAJOR < 58)\n#endif\n#ifndef FF_API_AVCTX_TIMEBASE\n#define FF_API_AVCTX_TIMEBASE    (LIBAVCODEC_VERSION_MAJOR < 59)\n#endif\n#ifndef FF_API_MPV_OPT\n#define FF_API_MPV_OPT           (LIBAVCODEC_VERSION_MAJOR < 59)\n#endif\n#ifndef FF_API_STREAM_CODEC_TAG\n#define FF_API_STREAM_CODEC_TAG  (LIBAVCODEC_VERSION_MAJOR < 59)\n#endif\n\n#endif /* AVCODEC_VERSION_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/vorbis_parser.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * A public API for Vorbis parsing\n *\n * Determines the duration for each packet.\n */\n\n#ifndef AVCODEC_VORBIS_PARSE_H\n#define AVCODEC_VORBIS_PARSE_H\n\n#include <stdint.h>\n\ntypedef struct AVVorbisParseContext AVVorbisParseContext;\n\n/**\n * Allocate and initialize the Vorbis parser using headers in the extradata.\n *\n * @param avctx codec context\n * @param s     Vorbis parser context\n */\nAVVorbisParseContext *av_vorbis_parse_init(const uint8_t *extradata,\n                                           int extradata_size);\n\n/**\n * Free the parser and everything associated with it.\n */\nvoid av_vorbis_parse_free(AVVorbisParseContext **s);\n\n#define VORBIS_FLAG_HEADER  0x00000001\n#define VORBIS_FLAG_COMMENT 0x00000002\n#define VORBIS_FLAG_SETUP   0x00000004\n\n/**\n * Get the duration for a Vorbis packet.\n *\n * If @p flags is @c NULL,\n * special frames are considered invalid.\n *\n * @param s        Vorbis parser context\n * @param buf      buffer containing a Vorbis frame\n * @param buf_size size of the buffer\n * @param flags    flags for special frames\n */\nint av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf,\n                                int buf_size, int *flags);\n\n/**\n * Get the duration for a Vorbis packet.\n *\n * @param s        Vorbis parser context\n * @param buf      buffer containing a Vorbis frame\n * @param buf_size size of the buffer\n */\nint av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf,\n                          int buf_size);\n\nvoid av_vorbis_parse_reset(AVVorbisParseContext *s);\n\n#endif /* AVCODEC_VORBIS_PARSE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavcodec/xvmc.h",
    "content": "/*\n * Copyright (C) 2003 Ivan Kalvachev\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVCODEC_XVMC_H\n#define AVCODEC_XVMC_H\n\n/**\n * @file\n * @ingroup lavc_codec_hwaccel_xvmc\n * Public libavcodec XvMC header.\n */\n\n#include <X11/extensions/XvMC.h>\n\n#include \"libavutil/attributes.h\"\n#include \"version.h\"\n#include \"avcodec.h\"\n\n/**\n * @defgroup lavc_codec_hwaccel_xvmc XvMC\n * @ingroup lavc_codec_hwaccel\n *\n * @{\n */\n\n#define AV_XVMC_ID                    0x1DC711C0  /**< special value to ensure that regular pixel routines haven't corrupted the struct\n                                                       the number is 1337 speak for the letters IDCT MCo (motion compensation) */\n\nattribute_deprecated struct xvmc_pix_fmt {\n    /** The field contains the special constant value AV_XVMC_ID.\n        It is used as a test that the application correctly uses the API,\n        and that there is no corruption caused by pixel routines.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             xvmc_id;\n\n    /** Pointer to the block array allocated by XvMCCreateBlocks().\n        The array has to be freed by XvMCDestroyBlocks().\n        Each group of 64 values represents one data block of differential\n        pixel information (in MoCo mode) or coefficients for IDCT.\n        - application - set the pointer during initialization\n        - libavcodec  - fills coefficients/pixel data into the array\n    */\n    short*          data_blocks;\n\n    /** Pointer to the macroblock description array allocated by\n        XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks().\n        - application - set the pointer during initialization\n        - libavcodec  - fills description data into the array\n    */\n    XvMCMacroBlock* mv_blocks;\n\n    /** Number of macroblock descriptions that can be stored in the mv_blocks\n        array.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             allocated_mv_blocks;\n\n    /** Number of blocks that can be stored at once in the data_blocks array.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             allocated_data_blocks;\n\n    /** Indicate that the hardware would interpret data_blocks as IDCT\n        coefficients and perform IDCT on them.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             idct;\n\n    /** In MoCo mode it indicates that intra macroblocks are assumed to be in\n        unsigned format; same as the XVMC_INTRA_UNSIGNED flag.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    int             unsigned_intra;\n\n    /** Pointer to the surface allocated by XvMCCreateSurface().\n        It has to be freed by XvMCDestroySurface() on application exit.\n        It identifies the frame and its state on the video hardware.\n        - application - set during initialization\n        - libavcodec  - unchanged\n    */\n    XvMCSurface*    p_surface;\n\n/** Set by the decoder before calling ff_draw_horiz_band(),\n    needed by the XvMCRenderSurface function. */\n//@{\n    /** Pointer to the surface used as past reference\n        - application - unchanged\n        - libavcodec  - set\n    */\n    XvMCSurface*    p_past_surface;\n\n    /** Pointer to the surface used as future reference\n        - application - unchanged\n        - libavcodec  - set\n    */\n    XvMCSurface*    p_future_surface;\n\n    /** top/bottom field or frame\n        - application - unchanged\n        - libavcodec  - set\n    */\n    unsigned int    picture_structure;\n\n    /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence\n        - application - unchanged\n        - libavcodec  - set\n    */\n    unsigned int    flags;\n//}@\n\n    /** Number of macroblock descriptions in the mv_blocks array\n        that have already been passed to the hardware.\n        - application - zeroes it on get_buffer().\n                        A successful ff_draw_horiz_band() may increment it\n                        with filled_mb_block_num or zero both.\n        - libavcodec  - unchanged\n    */\n    int             start_mv_blocks_num;\n\n    /** Number of new macroblock descriptions in the mv_blocks array (after\n        start_mv_blocks_num) that are filled by libavcodec and have to be\n        passed to the hardware.\n        - application - zeroes it on get_buffer() or after successful\n                        ff_draw_horiz_band().\n        - libavcodec  - increment with one of each stored MB\n    */\n    int             filled_mv_blocks_num;\n\n    /** Number of the next free data block; one data block consists of\n        64 short values in the data_blocks array.\n        All blocks before this one have already been claimed by placing their\n        position into the corresponding block description structure field,\n        that are part of the mv_blocks array.\n        - application - zeroes it on get_buffer().\n                        A successful ff_draw_horiz_band() may zero it together\n                        with start_mb_blocks_num.\n        - libavcodec  - each decoded macroblock increases it by the number\n                        of coded blocks it contains.\n    */\n    int             next_free_data_block_num;\n};\n\n/**\n * @}\n */\n\n#endif /* AVCODEC_XVMC_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavdevice/avdevice.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVDEVICE_AVDEVICE_H\n#define AVDEVICE_AVDEVICE_H\n\n#include \"version.h\"\n\n/**\n * @file\n * @ingroup lavd\n * Main libavdevice API header\n */\n\n/**\n * @defgroup lavd Special devices muxing/demuxing library\n * @{\n * Libavdevice is a complementary library to @ref libavf \"libavformat\". It\n * provides various \"special\" platform-specific muxers and demuxers, e.g. for\n * grabbing devices, audio capture and playback etc. As a consequence, the\n * (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own\n * I/O functions). The filename passed to avformat_open_input() often does not\n * refer to an actually existing file, but has some special device-specific\n * meaning - e.g. for x11grab it is the display name.\n *\n * To use libavdevice, simply call avdevice_register_all() to register all\n * compiled muxers and demuxers. They all use standard libavformat API.\n * @}\n */\n\n#include \"libavutil/log.h\"\n#include \"libavutil/opt.h\"\n#include \"libavutil/dict.h\"\n#include \"libavformat/avformat.h\"\n\n/**\n * Return the LIBAVDEVICE_VERSION_INT constant.\n */\nunsigned avdevice_version(void);\n\n/**\n * Return the libavdevice build-time configuration.\n */\nconst char *avdevice_configuration(void);\n\n/**\n * Return the libavdevice license.\n */\nconst char *avdevice_license(void);\n\n/**\n * Initialize libavdevice and register all the input and output devices.\n * @warning This function is not thread safe.\n */\nvoid avdevice_register_all(void);\n\n/**\n * Audio input devices iterator.\n *\n * If d is NULL, returns the first registered input audio/video device,\n * if d is non-NULL, returns the next registered input audio/video device after d\n * or NULL if d is the last one.\n */\nAVInputFormat *av_input_audio_device_next(AVInputFormat  *d);\n\n/**\n * Video input devices iterator.\n *\n * If d is NULL, returns the first registered input audio/video device,\n * if d is non-NULL, returns the next registered input audio/video device after d\n * or NULL if d is the last one.\n */\nAVInputFormat *av_input_video_device_next(AVInputFormat  *d);\n\n/**\n * Audio output devices iterator.\n *\n * If d is NULL, returns the first registered output audio/video device,\n * if d is non-NULL, returns the next registered output audio/video device after d\n * or NULL if d is the last one.\n */\nAVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);\n\n/**\n * Video output devices iterator.\n *\n * If d is NULL, returns the first registered output audio/video device,\n * if d is non-NULL, returns the next registered output audio/video device after d\n * or NULL if d is the last one.\n */\nAVOutputFormat *av_output_video_device_next(AVOutputFormat *d);\n\ntypedef struct AVDeviceRect {\n    int x;      /**< x coordinate of top left corner */\n    int y;      /**< y coordinate of top left corner */\n    int width;  /**< width */\n    int height; /**< height */\n} AVDeviceRect;\n\n/**\n * Message types used by avdevice_app_to_dev_control_message().\n */\nenum AVAppToDevMessageType {\n    /**\n     * Dummy message.\n     */\n    AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),\n\n    /**\n     * Window size change message.\n     *\n     * Message is sent to the device every time the application changes the size\n     * of the window device renders to.\n     * Message should also be sent right after window is created.\n     *\n     * data: AVDeviceRect: new window size.\n     */\n    AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),\n\n    /**\n     * Repaint request message.\n     *\n     * Message is sent to the device when window has to be repainted.\n     *\n     * data: AVDeviceRect: area required to be repainted.\n     *       NULL: whole area is required to be repainted.\n     */\n    AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),\n\n    /**\n     * Request pause/play.\n     *\n     * Application requests pause/unpause playback.\n     * Mostly usable with devices that have internal buffer.\n     * By default devices are not paused.\n     *\n     * data: NULL\n     */\n    AV_APP_TO_DEV_PAUSE        = MKBETAG('P', 'A', 'U', ' '),\n    AV_APP_TO_DEV_PLAY         = MKBETAG('P', 'L', 'A', 'Y'),\n    AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),\n\n    /**\n     * Volume control message.\n     *\n     * Set volume level. It may be device-dependent if volume\n     * is changed per stream or system wide. Per stream volume\n     * change is expected when possible.\n     *\n     * data: double: new volume with range of 0.0 - 1.0.\n     */\n    AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),\n\n    /**\n     * Mute control messages.\n     *\n     * Change mute state. It may be device-dependent if mute status\n     * is changed per stream or system wide. Per stream mute status\n     * change is expected when possible.\n     *\n     * data: NULL.\n     */\n    AV_APP_TO_DEV_MUTE        = MKBETAG(' ', 'M', 'U', 'T'),\n    AV_APP_TO_DEV_UNMUTE      = MKBETAG('U', 'M', 'U', 'T'),\n    AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),\n\n    /**\n     * Get volume/mute messages.\n     *\n     * Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or\n     * AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.\n     *\n     * data: NULL.\n     */\n    AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),\n    AV_APP_TO_DEV_GET_MUTE   = MKBETAG('G', 'M', 'U', 'T'),\n};\n\n/**\n * Message types used by avdevice_dev_to_app_control_message().\n */\nenum AVDevToAppMessageType {\n    /**\n     * Dummy message.\n     */\n    AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),\n\n    /**\n     * Create window buffer message.\n     *\n     * Device requests to create a window buffer. Exact meaning is device-\n     * and application-dependent. Message is sent before rendering first\n     * frame and all one-shot initializations should be done here.\n     * Application is allowed to ignore preferred window buffer size.\n     *\n     * @note: Application is obligated to inform about window buffer size\n     *        with AV_APP_TO_DEV_WINDOW_SIZE message.\n     *\n     * data: AVDeviceRect: preferred size of the window buffer.\n     *       NULL: no preferred size of the window buffer.\n     */\n    AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),\n\n    /**\n     * Prepare window buffer message.\n     *\n     * Device requests to prepare a window buffer for rendering.\n     * Exact meaning is device- and application-dependent.\n     * Message is sent before rendering of each frame.\n     *\n     * data: NULL.\n     */\n    AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),\n\n    /**\n     * Display window buffer message.\n     *\n     * Device requests to display a window buffer.\n     * Message is sent when new frame is ready to be displayed.\n     * Usually buffers need to be swapped in handler of this message.\n     *\n     * data: NULL.\n     */\n    AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),\n\n    /**\n     * Destroy window buffer message.\n     *\n     * Device requests to destroy a window buffer.\n     * Message is sent when device is about to be destroyed and window\n     * buffer is not required anymore.\n     *\n     * data: NULL.\n     */\n    AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),\n\n    /**\n     * Buffer fullness status messages.\n     *\n     * Device signals buffer overflow/underflow.\n     *\n     * data: NULL.\n     */\n    AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),\n    AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),\n\n    /**\n     * Buffer readable/writable.\n     *\n     * Device informs that buffer is readable/writable.\n     * When possible, device informs how many bytes can be read/write.\n     *\n     * @warning Device may not inform when number of bytes than can be read/write changes.\n     *\n     * data: int64_t: amount of bytes available to read/write.\n     *       NULL: amount of bytes available to read/write is not known.\n     */\n    AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),\n    AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),\n\n    /**\n     * Mute state change message.\n     *\n     * Device informs that mute state has changed.\n     *\n     * data: int: 0 for not muted state, non-zero for muted state.\n     */\n    AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),\n\n    /**\n     * Volume level change message.\n     *\n     * Device informs that volume level has changed.\n     *\n     * data: double: new volume with range of 0.0 - 1.0.\n     */\n    AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),\n};\n\n/**\n * Send control message from application to device.\n *\n * @param s         device context.\n * @param type      message type.\n * @param data      message data. Exact type depends on message type.\n * @param data_size size of message data.\n * @return >= 0 on success, negative on error.\n *         AVERROR(ENOSYS) when device doesn't implement handler of the message.\n */\nint avdevice_app_to_dev_control_message(struct AVFormatContext *s,\n                                        enum AVAppToDevMessageType type,\n                                        void *data, size_t data_size);\n\n/**\n * Send control message from device to application.\n *\n * @param s         device context.\n * @param type      message type.\n * @param data      message data. Can be NULL.\n * @param data_size size of message data.\n * @return >= 0 on success, negative on error.\n *         AVERROR(ENOSYS) when application doesn't implement handler of the message.\n */\nint avdevice_dev_to_app_control_message(struct AVFormatContext *s,\n                                        enum AVDevToAppMessageType type,\n                                        void *data, size_t data_size);\n\n/**\n * Following API allows user to probe device capabilities (supported codecs,\n * pixel formats, sample formats, resolutions, channel counts, etc).\n * It is build on top op AVOption API.\n * Queried capabilities allows to set up converters of video or audio\n * parameters that fit to the device.\n *\n * List of capabilities that can be queried:\n *  - Capabilities valid for both audio and video devices:\n *    - codec:          supported audio/video codecs.\n *                      type: AV_OPT_TYPE_INT (AVCodecID value)\n *  - Capabilities valid for audio devices:\n *    - sample_format:  supported sample formats.\n *                      type: AV_OPT_TYPE_INT (AVSampleFormat value)\n *    - sample_rate:    supported sample rates.\n *                      type: AV_OPT_TYPE_INT\n *    - channels:       supported number of channels.\n *                      type: AV_OPT_TYPE_INT\n *    - channel_layout: supported channel layouts.\n *                      type: AV_OPT_TYPE_INT64\n *  - Capabilities valid for video devices:\n *    - pixel_format:   supported pixel formats.\n *                      type: AV_OPT_TYPE_INT (AVPixelFormat value)\n *    - window_size:    supported window sizes (describes size of the window size presented to the user).\n *                      type: AV_OPT_TYPE_IMAGE_SIZE\n *    - frame_size:     supported frame sizes (describes size of provided video frames).\n *                      type: AV_OPT_TYPE_IMAGE_SIZE\n *    - fps:            supported fps values\n *                      type: AV_OPT_TYPE_RATIONAL\n *\n * Value of the capability may be set by user using av_opt_set() function\n * and AVDeviceCapabilitiesQuery object. Following queries will\n * limit results to the values matching already set capabilities.\n * For example, setting a codec may impact number of formats or fps values\n * returned during next query. Setting invalid value may limit results to zero.\n *\n * Example of the usage basing on opengl output device:\n *\n * @code\n *  AVFormatContext *oc = NULL;\n *  AVDeviceCapabilitiesQuery *caps = NULL;\n *  AVOptionRanges *ranges;\n *  int ret;\n *\n *  if ((ret = avformat_alloc_output_context2(&oc, NULL, \"opengl\", NULL)) < 0)\n *      goto fail;\n *  if (avdevice_capabilities_create(&caps, oc, NULL) < 0)\n *      goto fail;\n *\n *  //query codecs\n *  if (av_opt_query_ranges(&ranges, caps, \"codec\", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)\n *      goto fail;\n *  //pick codec here and set it\n *  av_opt_set(caps, \"codec\", AV_CODEC_ID_RAWVIDEO, 0);\n *\n *  //query format\n *  if (av_opt_query_ranges(&ranges, caps, \"pixel_format\", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)\n *      goto fail;\n *  //pick format here and set it\n *  av_opt_set(caps, \"pixel_format\", AV_PIX_FMT_YUV420P, 0);\n *\n *  //query and set more capabilities\n *\n * fail:\n *  //clean up code\n *  avdevice_capabilities_free(&query, oc);\n *  avformat_free_context(oc);\n * @endcode\n */\n\n/**\n * Structure describes device capabilities.\n *\n * It is used by devices in conjunction with av_device_capabilities AVOption table\n * to implement capabilities probing API based on AVOption API. Should not be used directly.\n */\ntypedef struct AVDeviceCapabilitiesQuery {\n    const AVClass *av_class;\n    AVFormatContext *device_context;\n    enum AVCodecID codec;\n    enum AVSampleFormat sample_format;\n    enum AVPixelFormat pixel_format;\n    int sample_rate;\n    int channels;\n    int64_t channel_layout;\n    int window_width;\n    int window_height;\n    int frame_width;\n    int frame_height;\n    AVRational fps;\n} AVDeviceCapabilitiesQuery;\n\n/**\n * AVOption table used by devices to implement device capabilities API. Should not be used by a user.\n */\nextern const AVOption av_device_capabilities[];\n\n/**\n * Initialize capabilities probing API based on AVOption API.\n *\n * avdevice_capabilities_free() must be called when query capabilities API is\n * not used anymore.\n *\n * @param[out] caps      Device capabilities data. Pointer to a NULL pointer must be passed.\n * @param s              Context of the device.\n * @param device_options An AVDictionary filled with device-private options.\n *                       On return this parameter will be destroyed and replaced with a dict\n *                       containing options that were not found. May be NULL.\n *                       The same options must be passed later to avformat_write_header() for output\n *                       devices or avformat_open_input() for input devices, or at any other place\n *                       that affects device-private options.\n *\n * @return >= 0 on success, negative otherwise.\n */\nint avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,\n                                 AVDictionary **device_options);\n\n/**\n * Free resources created by avdevice_capabilities_create()\n *\n * @param caps Device capabilities data to be freed.\n * @param s    Context of the device.\n */\nvoid avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);\n\n/**\n * Structure describes basic parameters of the device.\n */\ntypedef struct AVDeviceInfo {\n    char *device_name;                   /**< device name, format depends on device */\n    char *device_description;            /**< human friendly name */\n} AVDeviceInfo;\n\n/**\n * List of devices.\n */\ntypedef struct AVDeviceInfoList {\n    AVDeviceInfo **devices;              /**< list of autodetected devices */\n    int nb_devices;                      /**< number of autodetected devices */\n    int default_device;                  /**< index of default device or -1 if no default */\n} AVDeviceInfoList;\n\n/**\n * List devices.\n *\n * Returns available device names and their parameters.\n *\n * @note: Some devices may accept system-dependent device names that cannot be\n *        autodetected. The list returned by this function cannot be assumed to\n *        be always completed.\n *\n * @param s                device context.\n * @param[out] device_list list of autodetected devices.\n * @return count of autodetected devices, negative on error.\n */\nint avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);\n\n/**\n * Convenient function to free result of avdevice_list_devices().\n *\n * @param devices device list to be freed.\n */\nvoid avdevice_free_list_devices(AVDeviceInfoList **device_list);\n\n/**\n * List devices.\n *\n * Returns available device names and their parameters.\n * These are convinient wrappers for avdevice_list_devices().\n * Device context is allocated and deallocated internally.\n *\n * @param device           device format. May be NULL if device name is set.\n * @param device_name      device name. May be NULL if device format is set.\n * @param device_options   An AVDictionary filled with device-private options. May be NULL.\n *                         The same options must be passed later to avformat_write_header() for output\n *                         devices or avformat_open_input() for input devices, or at any other place\n *                         that affects device-private options.\n * @param[out] device_list list of autodetected devices\n * @return count of autodetected devices, negative on error.\n * @note device argument takes precedence over device_name when both are set.\n */\nint avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,\n                                AVDictionary *device_options, AVDeviceInfoList **device_list);\nint avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,\n                               AVDictionary *device_options, AVDeviceInfoList **device_list);\n\n#endif /* AVDEVICE_AVDEVICE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavdevice/version.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVDEVICE_VERSION_H\n#define AVDEVICE_VERSION_H\n\n/**\n * @file\n * @ingroup lavd\n * Libavdevice version macros\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBAVDEVICE_VERSION_MAJOR 56\n#define LIBAVDEVICE_VERSION_MINOR  4\n#define LIBAVDEVICE_VERSION_MICRO 100\n\n#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \\\n                                               LIBAVDEVICE_VERSION_MINOR, \\\n                                               LIBAVDEVICE_VERSION_MICRO)\n#define LIBAVDEVICE_VERSION     AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \\\n                                           LIBAVDEVICE_VERSION_MINOR, \\\n                                           LIBAVDEVICE_VERSION_MICRO)\n#define LIBAVDEVICE_BUILD       LIBAVDEVICE_VERSION_INT\n\n#define LIBAVDEVICE_IDENT       \"Lavd\" AV_STRINGIFY(LIBAVDEVICE_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n\n#endif /* AVDEVICE_VERSION_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavfilter/asrc_abuffer.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_ASRC_ABUFFER_H\n#define AVFILTER_ASRC_ABUFFER_H\n\n#include \"avfilter.h\"\n\n/**\n * @file\n * memory buffer source for audio\n *\n * @deprecated use buffersrc.h instead.\n */\n\n/**\n * Queue an audio buffer to the audio buffer source.\n *\n * @param abuffersrc audio source buffer context\n * @param data pointers to the samples planes\n * @param linesize linesizes of each audio buffer plane\n * @param nb_samples number of samples per channel\n * @param sample_fmt sample format of the audio data\n * @param ch_layout channel layout of the audio data\n * @param planar flag to indicate if audio data is planar or packed\n * @param pts presentation timestamp of the audio buffer\n * @param flags unused\n *\n * @deprecated use av_buffersrc_add_ref() instead.\n */\nattribute_deprecated\nint av_asrc_buffer_add_samples(AVFilterContext *abuffersrc,\n                               uint8_t *data[8], int linesize[8],\n                               int nb_samples, int sample_rate,\n                               int sample_fmt, int64_t ch_layout, int planar,\n                               int64_t pts, int av_unused flags);\n\n/**\n * Queue an audio buffer to the audio buffer source.\n *\n * This is similar to av_asrc_buffer_add_samples(), but the samples\n * are stored in a buffer with known size.\n *\n * @param abuffersrc audio source buffer context\n * @param buf pointer to the samples data, packed is assumed\n * @param size the size in bytes of the buffer, it must contain an\n * integer number of samples\n * @param sample_fmt sample format of the audio data\n * @param ch_layout channel layout of the audio data\n * @param pts presentation timestamp of the audio buffer\n * @param flags unused\n *\n * @deprecated use av_buffersrc_add_ref() instead.\n */\nattribute_deprecated\nint av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc,\n                              uint8_t *buf, int buf_size,\n                              int sample_rate,\n                              int sample_fmt, int64_t ch_layout, int planar,\n                              int64_t pts, int av_unused flags);\n\n/**\n * Queue an audio buffer to the audio buffer source.\n *\n * @param abuffersrc audio source buffer context\n * @param samplesref buffer ref to queue\n * @param flags unused\n *\n * @deprecated use av_buffersrc_add_ref() instead.\n */\nattribute_deprecated\nint av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc,\n                                        AVFilterBufferRef *samplesref,\n                                        int av_unused flags);\n\n#endif /* AVFILTER_ASRC_ABUFFER_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavfilter/avcodec.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_AVCODEC_H\n#define AVFILTER_AVCODEC_H\n\n/**\n * @file\n * libavcodec/libavfilter gluing utilities\n *\n * This should be included in an application ONLY if the installed\n * libavfilter has been compiled with libavcodec support, otherwise\n * symbols defined below will not be available.\n */\n\n#include \"avfilter.h\"\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Create and return a picref reference from the data and properties\n * contained in frame.\n *\n * @param perms permissions to assign to the new buffer reference\n * @deprecated avfilter APIs work natively with AVFrame instead.\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms);\n\n\n/**\n * Create and return a picref reference from the data and properties\n * contained in frame.\n *\n * @param perms permissions to assign to the new buffer reference\n * @deprecated avfilter APIs work natively with AVFrame instead.\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,\n                                                            int perms);\n\n/**\n * Create and return a buffer reference from the data and properties\n * contained in frame.\n *\n * @param perms permissions to assign to the new buffer reference\n * @deprecated avfilter APIs work natively with AVFrame instead.\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,\n                                                      const AVFrame *frame,\n                                                      int perms);\n#endif\n\n#endif /* AVFILTER_AVCODEC_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavfilter/avfilter.h",
    "content": "/*\n * filter layer\n * Copyright (c) 2007 Bobby Bingham\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_AVFILTER_H\n#define AVFILTER_AVFILTER_H\n\n/**\n * @file\n * @ingroup lavfi\n * Main libavfilter public API header\n */\n\n/**\n * @defgroup lavfi Libavfilter - graph-based frame editing library\n * @{\n */\n\n#include <stddef.h>\n\n#include \"libavutil/attributes.h\"\n#include \"libavutil/avutil.h\"\n#include \"libavutil/dict.h\"\n#include \"libavutil/frame.h\"\n#include \"libavutil/log.h\"\n#include \"libavutil/samplefmt.h\"\n#include \"libavutil/pixfmt.h\"\n#include \"libavutil/rational.h\"\n\n#include \"libavfilter/version.h\"\n\n/**\n * Return the LIBAVFILTER_VERSION_INT constant.\n */\nunsigned avfilter_version(void);\n\n/**\n * Return the libavfilter build-time configuration.\n */\nconst char *avfilter_configuration(void);\n\n/**\n * Return the libavfilter license.\n */\nconst char *avfilter_license(void);\n\ntypedef struct AVFilterContext AVFilterContext;\ntypedef struct AVFilterLink    AVFilterLink;\ntypedef struct AVFilterPad     AVFilterPad;\ntypedef struct AVFilterFormats AVFilterFormats;\n\n#if FF_API_AVFILTERBUFFER\n/**\n * A reference-counted buffer data type used by the filter system. Filters\n * should not store pointers to this structure directly, but instead use the\n * AVFilterBufferRef structure below.\n */\ntypedef struct AVFilterBuffer {\n    uint8_t *data[8];           ///< buffer data for each plane/channel\n\n    /**\n     * pointers to the data planes/channels.\n     *\n     * For video, this should simply point to data[].\n     *\n     * For planar audio, each channel has a separate data pointer, and\n     * linesize[0] contains the size of each channel buffer.\n     * For packed audio, there is just one data pointer, and linesize[0]\n     * contains the total size of the buffer for all channels.\n     *\n     * Note: Both data and extended_data will always be set, but for planar\n     * audio with more channels that can fit in data, extended_data must be used\n     * in order to access all channels.\n     */\n    uint8_t **extended_data;\n    int linesize[8];            ///< number of bytes per line\n\n    /** private data to be used by a custom free function */\n    void *priv;\n    /**\n     * A pointer to the function to deallocate this buffer if the default\n     * function is not sufficient. This could, for example, add the memory\n     * back into a memory pool to be reused later without the overhead of\n     * reallocating it from scratch.\n     */\n    void (*free)(struct AVFilterBuffer *buf);\n\n    int format;                 ///< media format\n    int w, h;                   ///< width and height of the allocated buffer\n    unsigned refcount;          ///< number of references to this buffer\n} AVFilterBuffer;\n\n#define AV_PERM_READ     0x01   ///< can read from the buffer\n#define AV_PERM_WRITE    0x02   ///< can write to the buffer\n#define AV_PERM_PRESERVE 0x04   ///< nobody else can overwrite the buffer\n#define AV_PERM_REUSE    0x08   ///< can output the buffer multiple times, with the same contents each time\n#define AV_PERM_REUSE2   0x10   ///< can output the buffer multiple times, modified each time\n#define AV_PERM_NEG_LINESIZES 0x20  ///< the buffer requested can have negative linesizes\n#define AV_PERM_ALIGN    0x40   ///< the buffer must be aligned\n\n#define AVFILTER_ALIGN 16 //not part of ABI\n\n/**\n * Audio specific properties in a reference to an AVFilterBuffer. Since\n * AVFilterBufferRef is common to different media formats, audio specific\n * per reference properties must be separated out.\n */\ntypedef struct AVFilterBufferRefAudioProps {\n    uint64_t channel_layout;    ///< channel layout of audio buffer\n    int nb_samples;             ///< number of audio samples per channel\n    int sample_rate;            ///< audio buffer sample rate\n    int channels;               ///< number of channels (do not access directly)\n} AVFilterBufferRefAudioProps;\n\n/**\n * Video specific properties in a reference to an AVFilterBuffer. Since\n * AVFilterBufferRef is common to different media formats, video specific\n * per reference properties must be separated out.\n */\ntypedef struct AVFilterBufferRefVideoProps {\n    int w;                      ///< image width\n    int h;                      ///< image height\n    AVRational sample_aspect_ratio; ///< sample aspect ratio\n    int interlaced;             ///< is frame interlaced\n    int top_field_first;        ///< field order\n    enum AVPictureType pict_type; ///< picture type of the frame\n    int key_frame;              ///< 1 -> keyframe, 0-> not\n    int qp_table_linesize;                ///< qp_table stride\n    int qp_table_size;            ///< qp_table size\n    int8_t *qp_table;             ///< array of Quantization Parameters\n} AVFilterBufferRefVideoProps;\n\n/**\n * A reference to an AVFilterBuffer. Since filters can manipulate the origin of\n * a buffer to, for example, crop image without any memcpy, the buffer origin\n * and dimensions are per-reference properties. Linesize is also useful for\n * image flipping, frame to field filters, etc, and so is also per-reference.\n *\n * TODO: add anything necessary for frame reordering\n */\ntypedef struct AVFilterBufferRef {\n    AVFilterBuffer *buf;        ///< the buffer that this is a reference to\n    uint8_t *data[8];           ///< picture/audio data for each plane\n    /**\n     * pointers to the data planes/channels.\n     *\n     * For video, this should simply point to data[].\n     *\n     * For planar audio, each channel has a separate data pointer, and\n     * linesize[0] contains the size of each channel buffer.\n     * For packed audio, there is just one data pointer, and linesize[0]\n     * contains the total size of the buffer for all channels.\n     *\n     * Note: Both data and extended_data will always be set, but for planar\n     * audio with more channels that can fit in data, extended_data must be used\n     * in order to access all channels.\n     */\n    uint8_t **extended_data;\n    int linesize[8];            ///< number of bytes per line\n\n    AVFilterBufferRefVideoProps *video; ///< video buffer specific properties\n    AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties\n\n    /**\n     * presentation timestamp. The time unit may change during\n     * filtering, as it is specified in the link and the filter code\n     * may need to rescale the PTS accordingly.\n     */\n    int64_t pts;\n    int64_t pos;                ///< byte position in stream, -1 if unknown\n\n    int format;                 ///< media format\n\n    int perms;                  ///< permissions, see the AV_PERM_* flags\n\n    enum AVMediaType type;      ///< media type of buffer data\n\n    AVDictionary *metadata;     ///< dictionary containing metadata key=value tags\n} AVFilterBufferRef;\n\n/**\n * Copy properties of src to dst, without copying the actual data\n */\nattribute_deprecated\nvoid avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, const AVFilterBufferRef *src);\n\n/**\n * Add a new reference to a buffer.\n *\n * @param ref   an existing reference to the buffer\n * @param pmask a bitmask containing the allowable permissions in the new\n *              reference\n * @return      a new reference to the buffer with the same properties as the\n *              old, excluding any permissions denied by pmask\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);\n\n/**\n * Remove a reference to a buffer. If this is the last reference to the\n * buffer, the buffer itself is also automatically freed.\n *\n * @param ref reference to the buffer, may be NULL\n *\n * @note it is recommended to use avfilter_unref_bufferp() instead of this\n * function\n */\nattribute_deprecated\nvoid avfilter_unref_buffer(AVFilterBufferRef *ref);\n\n/**\n * Remove a reference to a buffer and set the pointer to NULL.\n * If this is the last reference to the buffer, the buffer itself\n * is also automatically freed.\n *\n * @param ref pointer to the buffer reference\n */\nattribute_deprecated\nvoid avfilter_unref_bufferp(AVFilterBufferRef **ref);\n#endif\n\n/**\n * Get the number of channels of a buffer reference.\n */\nattribute_deprecated\nint avfilter_ref_get_channels(AVFilterBufferRef *ref);\n\n#if FF_API_AVFILTERPAD_PUBLIC\n/**\n * A filter pad used for either input or output.\n *\n * See doc/filter_design.txt for details on how to implement the methods.\n *\n * @warning this struct might be removed from public API.\n * users should call avfilter_pad_get_name() and avfilter_pad_get_type()\n * to access the name and type fields; there should be no need to access\n * any other fields from outside of libavfilter.\n */\nstruct AVFilterPad {\n    /**\n     * Pad name. The name is unique among inputs and among outputs, but an\n     * input may have the same name as an output. This may be NULL if this\n     * pad has no need to ever be referenced by name.\n     */\n    const char *name;\n\n    /**\n     * AVFilterPad type.\n     */\n    enum AVMediaType type;\n\n    /**\n     * Input pads:\n     * Minimum required permissions on incoming buffers. Any buffer with\n     * insufficient permissions will be automatically copied by the filter\n     * system to a new buffer which provides the needed access permissions.\n     *\n     * Output pads:\n     * Guaranteed permissions on outgoing buffers. Any buffer pushed on the\n     * link must have at least these permissions; this fact is checked by\n     * asserts. It can be used to optimize buffer allocation.\n     */\n    attribute_deprecated int min_perms;\n\n    /**\n     * Input pads:\n     * Permissions which are not accepted on incoming buffers. Any buffer\n     * which has any of these permissions set will be automatically copied\n     * by the filter system to a new buffer which does not have those\n     * permissions. This can be used to easily disallow buffers with\n     * AV_PERM_REUSE.\n     *\n     * Output pads:\n     * Permissions which are automatically removed on outgoing buffers. It\n     * can be used to optimize buffer allocation.\n     */\n    attribute_deprecated int rej_perms;\n\n    /**\n     * @deprecated unused\n     */\n    int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref);\n\n    /**\n     * Callback function to get a video buffer. If NULL, the filter system will\n     * use ff_default_get_video_buffer().\n     *\n     * Input video pads only.\n     */\n    AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);\n\n    /**\n     * Callback function to get an audio buffer. If NULL, the filter system will\n     * use ff_default_get_audio_buffer().\n     *\n     * Input audio pads only.\n     */\n    AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);\n\n    /**\n     * @deprecated unused\n     */\n    int (*end_frame)(AVFilterLink *link);\n\n    /**\n     * @deprecated unused\n     */\n    int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir);\n\n    /**\n     * Filtering callback. This is where a filter receives a frame with\n     * audio/video data and should do its processing.\n     *\n     * Input pads only.\n     *\n     * @return >= 0 on success, a negative AVERROR on error. This function\n     * must ensure that frame is properly unreferenced on error if it\n     * hasn't been passed on to another filter.\n     */\n    int (*filter_frame)(AVFilterLink *link, AVFrame *frame);\n\n    /**\n     * Frame poll callback. This returns the number of immediately available\n     * samples. It should return a positive value if the next request_frame()\n     * is guaranteed to return one frame (with no delay).\n     *\n     * Defaults to just calling the source poll_frame() method.\n     *\n     * Output pads only.\n     */\n    int (*poll_frame)(AVFilterLink *link);\n\n    /**\n     * Frame request callback. A call to this should result in at least one\n     * frame being output over the given link. This should return zero on\n     * success, and another value on error.\n     * See ff_request_frame() for the error codes with a specific\n     * meaning.\n     *\n     * Output pads only.\n     */\n    int (*request_frame)(AVFilterLink *link);\n\n    /**\n     * Link configuration callback.\n     *\n     * For output pads, this should set the following link properties:\n     * video: width, height, sample_aspect_ratio, time_base\n     * audio: sample_rate.\n     *\n     * This should NOT set properties such as format, channel_layout, etc which\n     * are negotiated between filters by the filter system using the\n     * query_formats() callback before this function is called.\n     *\n     * For input pads, this should check the properties of the link, and update\n     * the filter's internal state as necessary.\n     *\n     * For both input and output pads, this should return zero on success,\n     * and another value on error.\n     */\n    int (*config_props)(AVFilterLink *link);\n\n    /**\n     * The filter expects a fifo to be inserted on its input link,\n     * typically because it has a delay.\n     *\n     * input pads only.\n     */\n    int needs_fifo;\n\n    /**\n     * The filter expects writable frames from its input link,\n     * duplicating data buffers if needed.\n     *\n     * input pads only.\n     */\n    int needs_writable;\n};\n#endif\n\n/**\n * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g.\n * AVFilter.inputs/outputs).\n */\nint avfilter_pad_count(const AVFilterPad *pads);\n\n/**\n * Get the name of an AVFilterPad.\n *\n * @param pads an array of AVFilterPads\n * @param pad_idx index of the pad in the array it; is the caller's\n *                responsibility to ensure the index is valid\n *\n * @return name of the pad_idx'th pad in pads\n */\nconst char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx);\n\n/**\n * Get the type of an AVFilterPad.\n *\n * @param pads an array of AVFilterPads\n * @param pad_idx index of the pad in the array; it is the caller's\n *                responsibility to ensure the index is valid\n *\n * @return type of the pad_idx'th pad in pads\n */\nenum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx);\n\n/**\n * The number of the filter inputs is not determined just by AVFilter.inputs.\n * The filter might add additional inputs during initialization depending on the\n * options supplied to it.\n */\n#define AVFILTER_FLAG_DYNAMIC_INPUTS        (1 << 0)\n/**\n * The number of the filter outputs is not determined just by AVFilter.outputs.\n * The filter might add additional outputs during initialization depending on\n * the options supplied to it.\n */\n#define AVFILTER_FLAG_DYNAMIC_OUTPUTS       (1 << 1)\n/**\n * The filter supports multithreading by splitting frames into multiple parts\n * and processing them concurrently.\n */\n#define AVFILTER_FLAG_SLICE_THREADS         (1 << 2)\n/**\n * Some filters support a generic \"enable\" expression option that can be used\n * to enable or disable a filter in the timeline. Filters supporting this\n * option have this flag set. When the enable expression is false, the default\n * no-op filter_frame() function is called in place of the filter_frame()\n * callback defined on each input pad, thus the frame is passed unchanged to\n * the next filters.\n */\n#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC  (1 << 16)\n/**\n * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will\n * have its filter_frame() callback(s) called as usual even when the enable\n * expression is false. The filter will disable filtering within the\n * filter_frame() callback(s) itself, for example executing code depending on\n * the AVFilterContext->is_disabled value.\n */\n#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17)\n/**\n * Handy mask to test whether the filter supports or no the timeline feature\n * (internally or generically).\n */\n#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL)\n\n/**\n * Filter definition. This defines the pads a filter contains, and all the\n * callback functions used to interact with the filter.\n */\ntypedef struct AVFilter {\n    /**\n     * Filter name. Must be non-NULL and unique among filters.\n     */\n    const char *name;\n\n    /**\n     * A description of the filter. May be NULL.\n     *\n     * You should use the NULL_IF_CONFIG_SMALL() macro to define it.\n     */\n    const char *description;\n\n    /**\n     * List of inputs, terminated by a zeroed element.\n     *\n     * NULL if there are no (static) inputs. Instances of filters with\n     * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in\n     * this list.\n     */\n    const AVFilterPad *inputs;\n    /**\n     * List of outputs, terminated by a zeroed element.\n     *\n     * NULL if there are no (static) outputs. Instances of filters with\n     * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in\n     * this list.\n     */\n    const AVFilterPad *outputs;\n\n    /**\n     * A class for the private data, used to declare filter private AVOptions.\n     * This field is NULL for filters that do not declare any options.\n     *\n     * If this field is non-NULL, the first member of the filter private data\n     * must be a pointer to AVClass, which will be set by libavfilter generic\n     * code to this class.\n     */\n    const AVClass *priv_class;\n\n    /**\n     * A combination of AVFILTER_FLAG_*\n     */\n    int flags;\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavfilter and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n\n    /**\n     * Filter initialization function.\n     *\n     * This callback will be called only once during the filter lifetime, after\n     * all the options have been set, but before links between filters are\n     * established and format negotiation is done.\n     *\n     * Basic filter initialization should be done here. Filters with dynamic\n     * inputs and/or outputs should create those inputs/outputs here based on\n     * provided options. No more changes to this filter's inputs/outputs can be\n     * done after this callback.\n     *\n     * This callback must not assume that the filter links exist or frame\n     * parameters are known.\n     *\n     * @ref AVFilter.uninit \"uninit\" is guaranteed to be called even if\n     * initialization fails, so this callback does not have to clean up on\n     * failure.\n     *\n     * @return 0 on success, a negative AVERROR on failure\n     */\n    int (*init)(AVFilterContext *ctx);\n\n    /**\n     * Should be set instead of @ref AVFilter.init \"init\" by the filters that\n     * want to pass a dictionary of AVOptions to nested contexts that are\n     * allocated during init.\n     *\n     * On return, the options dict should be freed and replaced with one that\n     * contains all the options which could not be processed by this filter (or\n     * with NULL if all the options were processed).\n     *\n     * Otherwise the semantics is the same as for @ref AVFilter.init \"init\".\n     */\n    int (*init_dict)(AVFilterContext *ctx, AVDictionary **options);\n\n    /**\n     * Filter uninitialization function.\n     *\n     * Called only once right before the filter is freed. Should deallocate any\n     * memory held by the filter, release any buffer references, etc. It does\n     * not need to deallocate the AVFilterContext.priv memory itself.\n     *\n     * This callback may be called even if @ref AVFilter.init \"init\" was not\n     * called or failed, so it must be prepared to handle such a situation.\n     */\n    void (*uninit)(AVFilterContext *ctx);\n\n    /**\n     * Query formats supported by the filter on its inputs and outputs.\n     *\n     * This callback is called after the filter is initialized (so the inputs\n     * and outputs are fixed), shortly before the format negotiation. This\n     * callback may be called more than once.\n     *\n     * This callback must set AVFilterLink.out_formats on every input link and\n     * AVFilterLink.in_formats on every output link to a list of pixel/sample\n     * formats that the filter supports on that link. For audio links, this\n     * filter must also set @ref AVFilterLink.in_samplerates \"in_samplerates\" /\n     * @ref AVFilterLink.out_samplerates \"out_samplerates\" and\n     * @ref AVFilterLink.in_channel_layouts \"in_channel_layouts\" /\n     * @ref AVFilterLink.out_channel_layouts \"out_channel_layouts\" analogously.\n     *\n     * This callback may be NULL for filters with one input, in which case\n     * libavfilter assumes that it supports all input formats and preserves\n     * them on output.\n     *\n     * @return zero on success, a negative value corresponding to an\n     * AVERROR code otherwise\n     */\n    int (*query_formats)(AVFilterContext *);\n\n    int priv_size;      ///< size of private data to allocate for the filter\n\n    /**\n     * Used by the filter registration system. Must not be touched by any other\n     * code.\n     */\n    struct AVFilter *next;\n\n    /**\n     * Make the filter instance process a command.\n     *\n     * @param cmd    the command to process, for handling simplicity all commands must be alphanumeric only\n     * @param arg    the argument for the command\n     * @param res    a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported.\n     * @param flags  if AVFILTER_CMD_FLAG_FAST is set and the command would be\n     *               time consuming then a filter should treat it like an unsupported command\n     *\n     * @returns >=0 on success otherwise an error code.\n     *          AVERROR(ENOSYS) on unsupported commands\n     */\n    int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags);\n\n    /**\n     * Filter initialization function, alternative to the init()\n     * callback. Args contains the user-supplied parameters, opaque is\n     * used for providing binary data.\n     */\n    int (*init_opaque)(AVFilterContext *ctx, void *opaque);\n} AVFilter;\n\n/**\n * Process multiple parts of the frame concurrently.\n */\n#define AVFILTER_THREAD_SLICE (1 << 0)\n\ntypedef struct AVFilterInternal AVFilterInternal;\n\n/** An instance of a filter */\nstruct AVFilterContext {\n    const AVClass *av_class;        ///< needed for av_log() and filters common options\n\n    const AVFilter *filter;         ///< the AVFilter of which this is an instance\n\n    char *name;                     ///< name of this filter instance\n\n    AVFilterPad   *input_pads;      ///< array of input pads\n    AVFilterLink **inputs;          ///< array of pointers to input links\n#if FF_API_FOO_COUNT\n    attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs\n#endif\n    unsigned    nb_inputs;          ///< number of input pads\n\n    AVFilterPad   *output_pads;     ///< array of output pads\n    AVFilterLink **outputs;         ///< array of pointers to output links\n#if FF_API_FOO_COUNT\n    attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs\n#endif\n    unsigned    nb_outputs;         ///< number of output pads\n\n    void *priv;                     ///< private data for use by the filter\n\n    struct AVFilterGraph *graph;    ///< filtergraph this filter belongs to\n\n    /**\n     * Type of multithreading being allowed/used. A combination of\n     * AVFILTER_THREAD_* flags.\n     *\n     * May be set by the caller before initializing the filter to forbid some\n     * or all kinds of multithreading for this filter. The default is allowing\n     * everything.\n     *\n     * When the filter is initialized, this field is combined using bit AND with\n     * AVFilterGraph.thread_type to get the final mask used for determining\n     * allowed threading types. I.e. a threading type needs to be set in both\n     * to be allowed.\n     *\n     * After the filter is initialized, libavfilter sets this field to the\n     * threading type that is actually used (0 for no multithreading).\n     */\n    int thread_type;\n\n    /**\n     * An opaque struct for libavfilter internal use.\n     */\n    AVFilterInternal *internal;\n\n    struct AVFilterCommand *command_queue;\n\n    char *enable_str;               ///< enable expression string\n    void *enable;                   ///< parsed expression (AVExpr*)\n    double *var_values;             ///< variable values for the enable expression\n    int is_disabled;                ///< the enabled state from the last expression evaluation\n};\n\n/**\n * A link between two filters. This contains pointers to the source and\n * destination filters between which this link exists, and the indexes of\n * the pads involved. In addition, this link also contains the parameters\n * which have been negotiated and agreed upon between the filter, such as\n * image dimensions, format, etc.\n */\nstruct AVFilterLink {\n    AVFilterContext *src;       ///< source filter\n    AVFilterPad *srcpad;        ///< output pad on the source filter\n\n    AVFilterContext *dst;       ///< dest filter\n    AVFilterPad *dstpad;        ///< input pad on the dest filter\n\n    enum AVMediaType type;      ///< filter media type\n\n    /* These parameters apply only to video */\n    int w;                      ///< agreed upon image width\n    int h;                      ///< agreed upon image height\n    AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio\n    /* These parameters apply only to audio */\n    uint64_t channel_layout;    ///< channel layout of current buffer (see libavutil/channel_layout.h)\n    int sample_rate;            ///< samples per second\n\n    int format;                 ///< agreed upon media format\n\n    /**\n     * Define the time base used by the PTS of the frames/samples\n     * which will pass through this link.\n     * During the configuration stage, each filter is supposed to\n     * change only the output timebase, while the timebase of the\n     * input link is assumed to be an unchangeable property.\n     */\n    AVRational time_base;\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavfilter and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    /**\n     * Lists of formats and channel layouts supported by the input and output\n     * filters respectively. These lists are used for negotiating the format\n     * to actually be used, which will be loaded into the format and\n     * channel_layout members, above, when chosen.\n     *\n     */\n    AVFilterFormats *in_formats;\n    AVFilterFormats *out_formats;\n\n    /**\n     * Lists of channel layouts and sample rates used for automatic\n     * negotiation.\n     */\n    AVFilterFormats  *in_samplerates;\n    AVFilterFormats *out_samplerates;\n    struct AVFilterChannelLayouts  *in_channel_layouts;\n    struct AVFilterChannelLayouts *out_channel_layouts;\n\n    /**\n     * Audio only, the destination filter sets this to a non-zero value to\n     * request that buffers with the given number of samples should be sent to\n     * it. AVFilterPad.needs_fifo must also be set on the corresponding input\n     * pad.\n     * Last buffer before EOF will be padded with silence.\n     */\n    int request_samples;\n\n    /** stage of the initialization of the link properties (dimensions, etc) */\n    enum {\n        AVLINK_UNINIT = 0,      ///< not started\n        AVLINK_STARTINIT,       ///< started, but incomplete\n        AVLINK_INIT             ///< complete\n    } init_state;\n\n    struct AVFilterPool *pool;\n\n    /**\n     * Graph the filter belongs to.\n     */\n    struct AVFilterGraph *graph;\n\n    /**\n     * Current timestamp of the link, as defined by the most recent\n     * frame(s), in AV_TIME_BASE units.\n     */\n    int64_t current_pts;\n\n    /**\n     * Index in the age array.\n     */\n    int age_index;\n\n    /**\n     * Frame rate of the stream on the link, or 1/0 if unknown;\n     * if left to 0/0, will be automatically be copied from the first input\n     * of the source filter if it exists.\n     *\n     * Sources should set it to the best estimation of the real frame rate.\n     * Filters should update it if necessary depending on their function.\n     * Sinks can use it to set a default output frame rate.\n     * It is similar to the r_frame_rate field in AVStream.\n     */\n    AVRational frame_rate;\n\n    /**\n     * Buffer partially filled with samples to achieve a fixed/minimum size.\n     */\n    AVFrame *partial_buf;\n\n    /**\n     * Size of the partial buffer to allocate.\n     * Must be between min_samples and max_samples.\n     */\n    int partial_buf_size;\n\n    /**\n     * Minimum number of samples to filter at once. If filter_frame() is\n     * called with fewer samples, it will accumulate them in partial_buf.\n     * This field and the related ones must not be changed after filtering\n     * has started.\n     * If 0, all related fields are ignored.\n     */\n    int min_samples;\n\n    /**\n     * Maximum number of samples to filter at once. If filter_frame() is\n     * called with more samples, it will split them.\n     */\n    int max_samples;\n\n    /**\n     * The buffer reference currently being received across the link by the\n     * destination filter. This is used internally by the filter system to\n     * allow automatic copying of buffers which do not have sufficient\n     * permissions for the destination. This should not be accessed directly\n     * by the filters.\n     */\n    AVFilterBufferRef *cur_buf_copy;\n\n    /**\n     * True if the link is closed.\n     * If set, all attempts of start_frame, filter_frame or request_frame\n     * will fail with AVERROR_EOF, and if necessary the reference will be\n     * destroyed.\n     * If request_frame returns AVERROR_EOF, this flag is set on the\n     * corresponding link.\n     * It can be set also be set by either the source or the destination\n     * filter.\n     */\n    int closed;\n\n    /**\n     * Number of channels.\n     */\n    int channels;\n\n    /**\n     * True if a frame is being requested on the link.\n     * Used internally by the framework.\n     */\n    unsigned frame_requested;\n\n    /**\n     * Link processing flags.\n     */\n    unsigned flags;\n\n    /**\n     * Number of past frames sent through the link.\n     */\n    int64_t frame_count;\n};\n\n/**\n * Link two filters together.\n *\n * @param src    the source filter\n * @param srcpad index of the output pad on the source filter\n * @param dst    the destination filter\n * @param dstpad index of the input pad on the destination filter\n * @return       zero on success\n */\nint avfilter_link(AVFilterContext *src, unsigned srcpad,\n                  AVFilterContext *dst, unsigned dstpad);\n\n/**\n * Free the link in *link, and set its pointer to NULL.\n */\nvoid avfilter_link_free(AVFilterLink **link);\n\n/**\n * Get the number of channels of a link.\n */\nint avfilter_link_get_channels(AVFilterLink *link);\n\n/**\n * Set the closed field of a link.\n */\nvoid avfilter_link_set_closed(AVFilterLink *link, int closed);\n\n/**\n * Negotiate the media format, dimensions, etc of all inputs to a filter.\n *\n * @param filter the filter to negotiate the properties for its inputs\n * @return       zero on successful negotiation\n */\nint avfilter_config_links(AVFilterContext *filter);\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Create a buffer reference wrapped around an already allocated image\n * buffer.\n *\n * @param data pointers to the planes of the image to reference\n * @param linesize linesizes for the planes of the image to reference\n * @param perms the required access permissions\n * @param w the width of the image specified by the data and linesize arrays\n * @param h the height of the image specified by the data and linesize arrays\n * @param format the pixel format of the image specified by the data and linesize arrays\n */\nattribute_deprecated\nAVFilterBufferRef *\navfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,\n                                          int w, int h, enum AVPixelFormat format);\n\n/**\n * Create an audio buffer reference wrapped around an already\n * allocated samples buffer.\n *\n * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version\n * that can handle unknown channel layouts.\n *\n * @param data           pointers to the samples plane buffers\n * @param linesize       linesize for the samples plane buffers\n * @param perms          the required access permissions\n * @param nb_samples     number of samples per channel\n * @param sample_fmt     the format of each sample in the buffer to allocate\n * @param channel_layout the channel layout of the buffer\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,\n                                                             int linesize,\n                                                             int perms,\n                                                             int nb_samples,\n                                                             enum AVSampleFormat sample_fmt,\n                                                             uint64_t channel_layout);\n/**\n * Create an audio buffer reference wrapped around an already\n * allocated samples buffer.\n *\n * @param data           pointers to the samples plane buffers\n * @param linesize       linesize for the samples plane buffers\n * @param perms          the required access permissions\n * @param nb_samples     number of samples per channel\n * @param sample_fmt     the format of each sample in the buffer to allocate\n * @param channels       the number of channels of the buffer\n * @param channel_layout the channel layout of the buffer,\n *                       must be either 0 or consistent with channels\n */\nattribute_deprecated\nAVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,\n                                                                      int linesize,\n                                                                      int perms,\n                                                                      int nb_samples,\n                                                                      enum AVSampleFormat sample_fmt,\n                                                                      int channels,\n                                                                      uint64_t channel_layout);\n\n#endif\n\n\n#define AVFILTER_CMD_FLAG_ONE   1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically\n#define AVFILTER_CMD_FLAG_FAST  2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw)\n\n/**\n * Make the filter instance process a command.\n * It is recommended to use avfilter_graph_send_command().\n */\nint avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags);\n\n/** Initialize the filter system. Register all builtin filters. */\nvoid avfilter_register_all(void);\n\n#if FF_API_OLD_FILTER_REGISTER\n/** Uninitialize the filter system. Unregister all filters. */\nattribute_deprecated\nvoid avfilter_uninit(void);\n#endif\n\n/**\n * Register a filter. This is only needed if you plan to use\n * avfilter_get_by_name later to lookup the AVFilter structure by name. A\n * filter can still by instantiated with avfilter_graph_alloc_filter even if it\n * is not registered.\n *\n * @param filter the filter to register\n * @return 0 if the registration was successful, a negative value\n * otherwise\n */\nint avfilter_register(AVFilter *filter);\n\n/**\n * Get a filter definition matching the given name.\n *\n * @param name the filter name to find\n * @return     the filter definition, if any matching one is registered.\n *             NULL if none found.\n */\n#if !FF_API_NOCONST_GET_NAME\nconst\n#endif\nAVFilter *avfilter_get_by_name(const char *name);\n\n/**\n * Iterate over all registered filters.\n * @return If prev is non-NULL, next registered filter after prev or NULL if\n * prev is the last filter. If prev is NULL, return the first registered filter.\n */\nconst AVFilter *avfilter_next(const AVFilter *prev);\n\n#if FF_API_OLD_FILTER_REGISTER\n/**\n * If filter is NULL, returns a pointer to the first registered filter pointer,\n * if filter is non-NULL, returns the next pointer after filter.\n * If the returned pointer points to NULL, the last registered filter\n * was already reached.\n * @deprecated use avfilter_next()\n */\nattribute_deprecated\nAVFilter **av_filter_next(AVFilter **filter);\n#endif\n\n#if FF_API_AVFILTER_OPEN\n/**\n * Create a filter instance.\n *\n * @param filter_ctx put here a pointer to the created filter context\n * on success, NULL on failure\n * @param filter    the filter to create an instance of\n * @param inst_name Name to give to the new instance. Can be NULL for none.\n * @return >= 0 in case of success, a negative error code otherwise\n * @deprecated use avfilter_graph_alloc_filter() instead\n */\nattribute_deprecated\nint avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name);\n#endif\n\n\n#if FF_API_AVFILTER_INIT_FILTER\n/**\n * Initialize a filter.\n *\n * @param filter the filter to initialize\n * @param args   A string of parameters to use when initializing the filter.\n *               The format and meaning of this string varies by filter.\n * @param opaque Any extra non-string data needed by the filter. The meaning\n *               of this parameter varies by filter.\n * @return       zero on success\n */\nattribute_deprecated\nint avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque);\n#endif\n\n/**\n * Initialize a filter with the supplied parameters.\n *\n * @param ctx  uninitialized filter context to initialize\n * @param args Options to initialize the filter with. This must be a\n *             ':'-separated list of options in the 'key=value' form.\n *             May be NULL if the options have been set directly using the\n *             AVOptions API or there are no options that need to be set.\n * @return 0 on success, a negative AVERROR on failure\n */\nint avfilter_init_str(AVFilterContext *ctx, const char *args);\n\n/**\n * Initialize a filter with the supplied dictionary of options.\n *\n * @param ctx     uninitialized filter context to initialize\n * @param options An AVDictionary filled with options for this filter. On\n *                return this parameter will be destroyed and replaced with\n *                a dict containing options that were not found. This dictionary\n *                must be freed by the caller.\n *                May be NULL, then this function is equivalent to\n *                avfilter_init_str() with the second parameter set to NULL.\n * @return 0 on success, a negative AVERROR on failure\n *\n * @note This function and avfilter_init_str() do essentially the same thing,\n * the difference is in manner in which the options are passed. It is up to the\n * calling code to choose whichever is more preferable. The two functions also\n * behave differently when some of the provided options are not declared as\n * supported by the filter. In such a case, avfilter_init_str() will fail, but\n * this function will leave those extra options in the options AVDictionary and\n * continue as usual.\n */\nint avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options);\n\n/**\n * Free a filter context. This will also remove the filter from its\n * filtergraph's list of filters.\n *\n * @param filter the filter to free\n */\nvoid avfilter_free(AVFilterContext *filter);\n\n/**\n * Insert a filter in the middle of an existing link.\n *\n * @param link the link into which the filter should be inserted\n * @param filt the filter to be inserted\n * @param filt_srcpad_idx the input pad on the filter to connect\n * @param filt_dstpad_idx the output pad on the filter to connect\n * @return     zero on success\n */\nint avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,\n                           unsigned filt_srcpad_idx, unsigned filt_dstpad_idx);\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Copy the frame properties of src to dst, without copying the actual\n * image data.\n *\n * @return 0 on success, a negative number on error.\n */\nattribute_deprecated\nint avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);\n\n/**\n * Copy the frame properties and data pointers of src to dst, without copying\n * the actual data.\n *\n * @return 0 on success, a negative number on error.\n */\nattribute_deprecated\nint avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);\n#endif\n\n/**\n * @return AVClass for AVFilterContext.\n *\n * @see av_opt_find().\n */\nconst AVClass *avfilter_get_class(void);\n\ntypedef struct AVFilterGraphInternal AVFilterGraphInternal;\n\n/**\n * A function pointer passed to the @ref AVFilterGraph.execute callback to be\n * executed multiple times, possibly in parallel.\n *\n * @param ctx the filter context the job belongs to\n * @param arg an opaque parameter passed through from @ref\n *            AVFilterGraph.execute\n * @param jobnr the index of the job being executed\n * @param nb_jobs the total number of jobs\n *\n * @return 0 on success, a negative AVERROR on error\n */\ntypedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);\n\n/**\n * A function executing multiple jobs, possibly in parallel.\n *\n * @param ctx the filter context to which the jobs belong\n * @param func the function to be called multiple times\n * @param arg the argument to be passed to func\n * @param ret a nb_jobs-sized array to be filled with return values from each\n *            invocation of func\n * @param nb_jobs the number of jobs to execute\n *\n * @return 0 on success, a negative AVERROR on error\n */\ntypedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func,\n                                    void *arg, int *ret, int nb_jobs);\n\ntypedef struct AVFilterGraph {\n    const AVClass *av_class;\n#if FF_API_FOO_COUNT\n    attribute_deprecated\n    unsigned filter_count_unused;\n#endif\n    AVFilterContext **filters;\n#if !FF_API_FOO_COUNT\n    unsigned nb_filters;\n#endif\n\n    char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters\n    char *resample_lavr_opts;   ///< libavresample options to use for the auto-inserted resample filters\n#if FF_API_FOO_COUNT\n    unsigned nb_filters;\n#endif\n\n    /**\n     * Type of multithreading allowed for filters in this graph. A combination\n     * of AVFILTER_THREAD_* flags.\n     *\n     * May be set by the caller at any point, the setting will apply to all\n     * filters initialized after that. The default is allowing everything.\n     *\n     * When a filter in this graph is initialized, this field is combined using\n     * bit AND with AVFilterContext.thread_type to get the final mask used for\n     * determining allowed threading types. I.e. a threading type needs to be\n     * set in both to be allowed.\n     */\n    int thread_type;\n\n    /**\n     * Maximum number of threads used by filters in this graph. May be set by\n     * the caller before adding any filters to the filtergraph. Zero (the\n     * default) means that the number of threads is determined automatically.\n     */\n    int nb_threads;\n\n    /**\n     * Opaque object for libavfilter internal use.\n     */\n    AVFilterGraphInternal *internal;\n\n    /**\n     * Opaque user data. May be set by the caller to an arbitrary value, e.g. to\n     * be used from callbacks like @ref AVFilterGraph.execute.\n     * Libavfilter will not touch this field in any way.\n     */\n    void *opaque;\n\n    /**\n     * This callback may be set by the caller immediately after allocating the\n     * graph and before adding any filters to it, to provide a custom\n     * multithreading implementation.\n     *\n     * If set, filters with slice threading capability will call this callback\n     * to execute multiple jobs in parallel.\n     *\n     * If this field is left unset, libavfilter will use its internal\n     * implementation, which may or may not be multithreaded depending on the\n     * platform and build options.\n     */\n    avfilter_execute_func *execute;\n\n    char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions\n\n    /**\n     * Private fields\n     *\n     * The following fields are for internal use only.\n     * Their type, offset, number and semantic can change without notice.\n     */\n\n    AVFilterLink **sink_links;\n    int sink_links_count;\n\n    unsigned disable_auto_convert;\n} AVFilterGraph;\n\n/**\n * Allocate a filter graph.\n */\nAVFilterGraph *avfilter_graph_alloc(void);\n\n/**\n * Create a new filter instance in a filter graph.\n *\n * @param graph graph in which the new filter will be used\n * @param filter the filter to create an instance of\n * @param name Name to give to the new instance (will be copied to\n *             AVFilterContext.name). This may be used by the caller to identify\n *             different filters, libavfilter itself assigns no semantics to\n *             this parameter. May be NULL.\n *\n * @return the context of the newly created filter instance (note that it is\n *         also retrievable directly through AVFilterGraph.filters or with\n *         avfilter_graph_get_filter()) on success or NULL on failure.\n */\nAVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,\n                                             const AVFilter *filter,\n                                             const char *name);\n\n/**\n * Get a filter instance identified by instance name from graph.\n *\n * @param graph filter graph to search through.\n * @param name filter instance name (should be unique in the graph).\n * @return the pointer to the found filter instance or NULL if it\n * cannot be found.\n */\nAVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name);\n\n#if FF_API_AVFILTER_OPEN\n/**\n * Add an existing filter instance to a filter graph.\n *\n * @param graphctx  the filter graph\n * @param filter the filter to be added\n *\n * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a\n * filter graph\n */\nattribute_deprecated\nint avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter);\n#endif\n\n/**\n * Create and add a filter instance into an existing graph.\n * The filter instance is created from the filter filt and inited\n * with the parameters args and opaque.\n *\n * In case of success put in *filt_ctx the pointer to the created\n * filter instance, otherwise set *filt_ctx to NULL.\n *\n * @param name the instance name to give to the created filter instance\n * @param graph_ctx the filter graph\n * @return a negative AVERROR error code in case of failure, a non\n * negative value otherwise\n */\nint avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,\n                                 const char *name, const char *args, void *opaque,\n                                 AVFilterGraph *graph_ctx);\n\n/**\n * Enable or disable automatic format conversion inside the graph.\n *\n * Note that format conversion can still happen inside explicitly inserted\n * scale and aresample filters.\n *\n * @param flags  any of the AVFILTER_AUTO_CONVERT_* constants\n */\nvoid avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);\n\nenum {\n    AVFILTER_AUTO_CONVERT_ALL  =  0, /**< all automatic conversions enabled */\n    AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */\n};\n\n/**\n * Check validity and configure all the links and formats in the graph.\n *\n * @param graphctx the filter graph\n * @param log_ctx context used for logging\n * @return >= 0 in case of success, a negative AVERROR code otherwise\n */\nint avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);\n\n/**\n * Free a graph, destroy its links, and set *graph to NULL.\n * If *graph is NULL, do nothing.\n */\nvoid avfilter_graph_free(AVFilterGraph **graph);\n\n/**\n * A linked-list of the inputs/outputs of the filter chain.\n *\n * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),\n * where it is used to communicate open (unlinked) inputs and outputs from and\n * to the caller.\n * This struct specifies, per each not connected pad contained in the graph, the\n * filter context and the pad index required for establishing a link.\n */\ntypedef struct AVFilterInOut {\n    /** unique name for this input/output in the list */\n    char *name;\n\n    /** filter context associated to this input/output */\n    AVFilterContext *filter_ctx;\n\n    /** index of the filt_ctx pad to use for linking */\n    int pad_idx;\n\n    /** next input/input in the list, NULL if this is the last */\n    struct AVFilterInOut *next;\n} AVFilterInOut;\n\n/**\n * Allocate a single AVFilterInOut entry.\n * Must be freed with avfilter_inout_free().\n * @return allocated AVFilterInOut on success, NULL on failure.\n */\nAVFilterInOut *avfilter_inout_alloc(void);\n\n/**\n * Free the supplied list of AVFilterInOut and set *inout to NULL.\n * If *inout is NULL, do nothing.\n */\nvoid avfilter_inout_free(AVFilterInOut **inout);\n\n#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE\n/**\n * Add a graph described by a string to a graph.\n *\n * @note The caller must provide the lists of inputs and outputs,\n * which therefore must be known before calling the function.\n *\n * @note The inputs parameter describes inputs of the already existing\n * part of the graph; i.e. from the point of view of the newly created\n * part, they are outputs. Similarly the outputs parameter describes\n * outputs of the already existing filters, which are provided as\n * inputs to the parsed filters.\n *\n * @param graph   the filter graph where to link the parsed graph context\n * @param filters string to be parsed\n * @param inputs  linked list to the inputs of the graph\n * @param outputs linked list to the outputs of the graph\n * @return zero on success, a negative AVERROR code on error\n */\nint avfilter_graph_parse(AVFilterGraph *graph, const char *filters,\n                         AVFilterInOut *inputs, AVFilterInOut *outputs,\n                         void *log_ctx);\n#else\n/**\n * Add a graph described by a string to a graph.\n *\n * @param graph   the filter graph where to link the parsed graph context\n * @param filters string to be parsed\n * @param inputs  pointer to a linked list to the inputs of the graph, may be NULL.\n *                If non-NULL, *inputs is updated to contain the list of open inputs\n *                after the parsing, should be freed with avfilter_inout_free().\n * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.\n *                If non-NULL, *outputs is updated to contain the list of open outputs\n *                after the parsing, should be freed with avfilter_inout_free().\n * @return non negative on success, a negative AVERROR code on error\n * @deprecated Use avfilter_graph_parse_ptr() instead.\n */\nattribute_deprecated\nint avfilter_graph_parse(AVFilterGraph *graph, const char *filters,\n                         AVFilterInOut **inputs, AVFilterInOut **outputs,\n                         void *log_ctx);\n#endif\n\n/**\n * Add a graph described by a string to a graph.\n *\n * In the graph filters description, if the input label of the first\n * filter is not specified, \"in\" is assumed; if the output label of\n * the last filter is not specified, \"out\" is assumed.\n *\n * @param graph   the filter graph where to link the parsed graph context\n * @param filters string to be parsed\n * @param inputs  pointer to a linked list to the inputs of the graph, may be NULL.\n *                If non-NULL, *inputs is updated to contain the list of open inputs\n *                after the parsing, should be freed with avfilter_inout_free().\n * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.\n *                If non-NULL, *outputs is updated to contain the list of open outputs\n *                after the parsing, should be freed with avfilter_inout_free().\n * @return non negative on success, a negative AVERROR code on error\n */\nint avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,\n                             AVFilterInOut **inputs, AVFilterInOut **outputs,\n                             void *log_ctx);\n\n/**\n * Add a graph described by a string to a graph.\n *\n * @param[in]  graph   the filter graph where to link the parsed graph context\n * @param[in]  filters string to be parsed\n * @param[out] inputs  a linked list of all free (unlinked) inputs of the\n *                     parsed graph will be returned here. It is to be freed\n *                     by the caller using avfilter_inout_free().\n * @param[out] outputs a linked list of all free (unlinked) outputs of the\n *                     parsed graph will be returned here. It is to be freed by the\n *                     caller using avfilter_inout_free().\n * @return zero on success, a negative AVERROR code on error\n *\n * @note This function returns the inputs and outputs that are left\n * unlinked after parsing the graph and the caller then deals with\n * them.\n * @note This function makes no reference whatsoever to already\n * existing parts of the graph and the inputs parameter will on return\n * contain inputs of the newly parsed part of the graph.  Analogously\n * the outputs parameter will contain outputs of the newly created\n * filters.\n */\nint avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,\n                          AVFilterInOut **inputs,\n                          AVFilterInOut **outputs);\n\n/**\n * Send a command to one or more filter instances.\n *\n * @param graph  the filter graph\n * @param target the filter(s) to which the command should be sent\n *               \"all\" sends to all filters\n *               otherwise it can be a filter or filter instance name\n *               which will send the command to all matching filters.\n * @param cmd    the command to send, for handling simplicity all commands must be alphanumeric only\n * @param arg    the argument for the command\n * @param res    a buffer with size res_size where the filter(s) can return a response.\n *\n * @returns >=0 on success otherwise an error code.\n *              AVERROR(ENOSYS) on unsupported commands\n */\nint avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);\n\n/**\n * Queue a command for one or more filter instances.\n *\n * @param graph  the filter graph\n * @param target the filter(s) to which the command should be sent\n *               \"all\" sends to all filters\n *               otherwise it can be a filter or filter instance name\n *               which will send the command to all matching filters.\n * @param cmd    the command to sent, for handling simplicity all commands must be alphanumeric only\n * @param arg    the argument for the command\n * @param ts     time at which the command should be sent to the filter\n *\n * @note As this executes commands after this function returns, no return code\n *       from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.\n */\nint avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);\n\n\n/**\n * Dump a graph into a human-readable string representation.\n *\n * @param graph    the graph to dump\n * @param options  formatting options; currently ignored\n * @return  a string, or NULL in case of memory allocation failure;\n *          the string must be freed using av_free\n */\nchar *avfilter_graph_dump(AVFilterGraph *graph, const char *options);\n\n/**\n * Request a frame on the oldest sink link.\n *\n * If the request returns AVERROR_EOF, try the next.\n *\n * Note that this function is not meant to be the sole scheduling mechanism\n * of a filtergraph, only a convenience function to help drain a filtergraph\n * in a balanced way under normal circumstances.\n *\n * Also note that AVERROR_EOF does not mean that frames did not arrive on\n * some of the sinks during the process.\n * When there are multiple sink links, in case the requested link\n * returns an EOF, this may cause a filter to flush pending frames\n * which are sent to another sink link, although unrequested.\n *\n * @return  the return value of ff_request_frame(),\n *          or AVERROR_EOF if all links returned AVERROR_EOF\n */\nint avfilter_graph_request_oldest(AVFilterGraph *graph);\n\n/**\n * @}\n */\n\n#endif /* AVFILTER_AVFILTER_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavfilter/avfiltergraph.h",
    "content": "/*\n * Filter graphs\n * copyright (c) 2007 Bobby Bingham\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_AVFILTERGRAPH_H\n#define AVFILTER_AVFILTERGRAPH_H\n\n#include \"avfilter.h\"\n#include \"libavutil/log.h\"\n\n#endif /* AVFILTER_AVFILTERGRAPH_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavfilter/buffersink.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_BUFFERSINK_H\n#define AVFILTER_BUFFERSINK_H\n\n/**\n * @file\n * @ingroup lavfi_buffersink\n * memory buffer sink API for audio and video\n */\n\n#include \"avfilter.h\"\n\n/**\n * @defgroup lavfi_buffersink Buffer sink API\n * @ingroup lavfi\n * @{\n */\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Get an audio/video buffer data from buffer_sink and put it in bufref.\n *\n * This function works with both audio and video buffer sinks.\n *\n * @param buffer_sink pointer to a buffersink or abuffersink context\n * @param flags a combination of AV_BUFFERSINK_FLAG_* flags\n * @return >= 0 in case of success, a negative AVERROR code in case of\n * failure\n */\nattribute_deprecated\nint av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,\n                                 AVFilterBufferRef **bufref, int flags);\n\n/**\n * Get the number of immediately available frames.\n */\nattribute_deprecated\nint av_buffersink_poll_frame(AVFilterContext *ctx);\n\n/**\n * Get a buffer with filtered data from sink and put it in buf.\n *\n * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.\n * @param buf pointer to the buffer will be written here if buf is non-NULL. buf\n *            must be freed by the caller using avfilter_unref_buffer().\n *            Buf may also be NULL to query whether a buffer is ready to be\n *            output.\n *\n * @return >= 0 in case of success, a negative AVERROR code in case of\n *         failure.\n */\nattribute_deprecated\nint av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);\n\n/**\n * Same as av_buffersink_read, but with the ability to specify the number of\n * samples read. This function is less efficient than av_buffersink_read(),\n * because it copies the data around.\n *\n * @param ctx pointer to a context of the abuffersink AVFilter.\n * @param buf pointer to the buffer will be written here if buf is non-NULL. buf\n *            must be freed by the caller using avfilter_unref_buffer(). buf\n *            will contain exactly nb_samples audio samples, except at the end\n *            of stream, when it can contain less than nb_samples.\n *            Buf may also be NULL to query whether a buffer is ready to be\n *            output.\n *\n * @warning do not mix this function with av_buffersink_read(). Use only one or\n * the other with a single sink, not both.\n */\nattribute_deprecated\nint av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,\n                               int nb_samples);\n#endif\n\n/**\n * Get a frame with filtered data from sink and put it in frame.\n *\n * @param ctx    pointer to a buffersink or abuffersink filter context.\n * @param frame  pointer to an allocated frame that will be filled with data.\n *               The data must be freed using av_frame_unref() / av_frame_free()\n * @param flags  a combination of AV_BUFFERSINK_FLAG_* flags\n *\n * @return  >= 0 in for success, a negative AVERROR code for failure.\n */\nint av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);\n\n/**\n * Tell av_buffersink_get_buffer_ref() to read video/samples buffer\n * reference, but not remove it from the buffer. This is useful if you\n * need only to read a video/samples buffer, without to fetch it.\n */\n#define AV_BUFFERSINK_FLAG_PEEK 1\n\n/**\n * Tell av_buffersink_get_buffer_ref() not to request a frame from its input.\n * If a frame is already buffered, it is read (and removed from the buffer),\n * but if no frame is present, return AVERROR(EAGAIN).\n */\n#define AV_BUFFERSINK_FLAG_NO_REQUEST 2\n\n/**\n * Struct to use for initializing a buffersink context.\n */\ntypedef struct {\n    const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE\n} AVBufferSinkParams;\n\n/**\n * Create an AVBufferSinkParams structure.\n *\n * Must be freed with av_free().\n */\nAVBufferSinkParams *av_buffersink_params_alloc(void);\n\n/**\n * Struct to use for initializing an abuffersink context.\n */\ntypedef struct {\n    const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE\n    const int64_t *channel_layouts;         ///< list of allowed channel layouts, terminated by -1\n    const int *channel_counts;              ///< list of allowed channel counts, terminated by -1\n    int all_channel_counts;                 ///< if not 0, accept any channel count or layout\n    int *sample_rates;                      ///< list of allowed sample rates, terminated by -1\n} AVABufferSinkParams;\n\n/**\n * Create an AVABufferSinkParams structure.\n *\n * Must be freed with av_free().\n */\nAVABufferSinkParams *av_abuffersink_params_alloc(void);\n\n/**\n * Set the frame size for an audio buffer sink.\n *\n * All calls to av_buffersink_get_buffer_ref will return a buffer with\n * exactly the specified number of samples, or AVERROR(EAGAIN) if there is\n * not enough. The last buffer at EOF will be padded with 0.\n */\nvoid av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);\n\n/**\n * Get the frame rate of the input.\n */\nAVRational av_buffersink_get_frame_rate(AVFilterContext *ctx);\n\n/**\n * Get a frame with filtered data from sink and put it in frame.\n *\n * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.\n * @param frame pointer to an allocated frame that will be filled with data.\n *              The data must be freed using av_frame_unref() / av_frame_free()\n *\n * @return\n *         - >= 0 if a frame was successfully returned.\n *         - AVERROR(EAGAIN) if no frames are available at this point; more\n *           input frames must be added to the filtergraph to get more output.\n *         - AVERROR_EOF if there will be no more output frames on this sink.\n *         - A different negative AVERROR code in other failure cases.\n */\nint av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);\n\n/**\n * Same as av_buffersink_get_frame(), but with the ability to specify the number\n * of samples read. This function is less efficient than\n * av_buffersink_get_frame(), because it copies the data around.\n *\n * @param ctx pointer to a context of the abuffersink AVFilter.\n * @param frame pointer to an allocated frame that will be filled with data.\n *              The data must be freed using av_frame_unref() / av_frame_free()\n *              frame will contain exactly nb_samples audio samples, except at\n *              the end of stream, when it can contain less than nb_samples.\n *\n * @return The return codes have the same meaning as for\n *         av_buffersink_get_samples().\n *\n * @warning do not mix this function with av_buffersink_get_frame(). Use only one or\n * the other with a single sink, not both.\n */\nint av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);\n\n/**\n * @}\n */\n\n#endif /* AVFILTER_BUFFERSINK_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavfilter/buffersrc.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_BUFFERSRC_H\n#define AVFILTER_BUFFERSRC_H\n\n/**\n * @file\n * @ingroup lavfi_buffersrc\n * Memory buffer source API.\n */\n\n#include \"libavcodec/avcodec.h\"\n#include \"avfilter.h\"\n\n/**\n * @defgroup lavfi_buffersrc Buffer source API\n * @ingroup lavfi\n * @{\n */\n\nenum {\n\n    /**\n     * Do not check for format changes.\n     */\n    AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,\n\n#if FF_API_AVFILTERBUFFER\n    /**\n     * Ignored\n     */\n    AV_BUFFERSRC_FLAG_NO_COPY = 2,\n#endif\n\n    /**\n     * Immediately push the frame to the output.\n     */\n    AV_BUFFERSRC_FLAG_PUSH = 4,\n\n    /**\n     * Keep a reference to the frame.\n     * If the frame if reference-counted, create a new reference; otherwise\n     * copy the frame data.\n     */\n    AV_BUFFERSRC_FLAG_KEEP_REF = 8,\n\n};\n\n/**\n * Add buffer data in picref to buffer_src.\n *\n * @param buffer_src  pointer to a buffer source context\n * @param picref      a buffer reference, or NULL to mark EOF\n * @param flags       a combination of AV_BUFFERSRC_FLAG_*\n * @return            >= 0 in case of success, a negative AVERROR code\n *                    in case of failure\n */\nint av_buffersrc_add_ref(AVFilterContext *buffer_src,\n                         AVFilterBufferRef *picref, int flags);\n\n/**\n * Get the number of failed requests.\n *\n * A failed request is when the request_frame method is called while no\n * frame is present in the buffer.\n * The number is reset when a frame is added.\n */\nunsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);\n\n#if FF_API_AVFILTERBUFFER\n/**\n * Add a buffer to a filtergraph.\n *\n * @param ctx an instance of the buffersrc filter\n * @param buf buffer containing frame data to be passed down the filtergraph.\n * This function will take ownership of buf, the user must not free it.\n * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.\n *\n * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame()\n */\nattribute_deprecated\nint av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf);\n#endif\n\n/**\n * Add a frame to the buffer source.\n *\n * @param ctx   an instance of the buffersrc filter\n * @param frame frame to be added. If the frame is reference counted, this\n * function will make a new reference to it. Otherwise the frame data will be\n * copied.\n *\n * @return 0 on success, a negative AVERROR on error\n *\n * This function is equivalent to av_buffersrc_add_frame_flags() with the\n * AV_BUFFERSRC_FLAG_KEEP_REF flag.\n */\nint av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);\n\n/**\n * Add a frame to the buffer source.\n *\n * @param ctx   an instance of the buffersrc filter\n * @param frame frame to be added. If the frame is reference counted, this\n * function will take ownership of the reference(s) and reset the frame.\n * Otherwise the frame data will be copied. If this function returns an error,\n * the input frame is not touched.\n *\n * @return 0 on success, a negative AVERROR on error.\n *\n * @note the difference between this function and av_buffersrc_write_frame() is\n * that av_buffersrc_write_frame() creates a new reference to the input frame,\n * while this function takes ownership of the reference passed to it.\n *\n * This function is equivalent to av_buffersrc_add_frame_flags() without the\n * AV_BUFFERSRC_FLAG_KEEP_REF flag.\n */\nint av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);\n\n/**\n * Add a frame to the buffer source.\n *\n * By default, if the frame is reference-counted, this function will take\n * ownership of the reference(s) and reset the frame. This can be controlled\n * using the flags.\n *\n * If this function returns an error, the input frame is not touched.\n *\n * @param buffer_src  pointer to a buffer source context\n * @param frame       a frame, or NULL to mark EOF\n * @param flags       a combination of AV_BUFFERSRC_FLAG_*\n * @return            >= 0 in case of success, a negative AVERROR code\n *                    in case of failure\n */\nint av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,\n                                 AVFrame *frame, int flags);\n\n\n/**\n * @}\n */\n\n#endif /* AVFILTER_BUFFERSRC_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavfilter/version.h",
    "content": "/*\n * Version macros.\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFILTER_VERSION_H\n#define AVFILTER_VERSION_H\n\n/**\n * @file\n * @ingroup lavfi\n * Libavfilter version macros\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBAVFILTER_VERSION_MAJOR  5\n#define LIBAVFILTER_VERSION_MINOR  11\n#define LIBAVFILTER_VERSION_MICRO 102\n\n#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \\\n                                               LIBAVFILTER_VERSION_MINOR, \\\n                                               LIBAVFILTER_VERSION_MICRO)\n#define LIBAVFILTER_VERSION     AV_VERSION(LIBAVFILTER_VERSION_MAJOR,   \\\n                                           LIBAVFILTER_VERSION_MINOR,   \\\n                                           LIBAVFILTER_VERSION_MICRO)\n#define LIBAVFILTER_BUILD       LIBAVFILTER_VERSION_INT\n\n#define LIBAVFILTER_IDENT       \"Lavfi\" AV_STRINGIFY(LIBAVFILTER_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n\n#ifndef FF_API_AVFILTERPAD_PUBLIC\n#define FF_API_AVFILTERPAD_PUBLIC           (LIBAVFILTER_VERSION_MAJOR < 6)\n#endif\n#ifndef FF_API_FOO_COUNT\n#define FF_API_FOO_COUNT                    (LIBAVFILTER_VERSION_MAJOR < 6)\n#endif\n#ifndef FF_API_AVFILTERBUFFER\n#define FF_API_AVFILTERBUFFER               (LIBAVFILTER_VERSION_MAJOR < 6)\n#endif\n#ifndef FF_API_OLD_FILTER_OPTS\n#define FF_API_OLD_FILTER_OPTS              (LIBAVFILTER_VERSION_MAJOR < 6)\n#endif\n#ifndef FF_API_AVFILTER_OPEN\n#define FF_API_AVFILTER_OPEN                (LIBAVFILTER_VERSION_MAJOR < 6)\n#endif\n#ifndef FF_API_AVFILTER_INIT_FILTER\n#define FF_API_AVFILTER_INIT_FILTER         (LIBAVFILTER_VERSION_MAJOR < 6)\n#endif\n#ifndef FF_API_OLD_FILTER_REGISTER\n#define FF_API_OLD_FILTER_REGISTER          (LIBAVFILTER_VERSION_MAJOR < 6)\n#endif\n#ifndef FF_API_OLD_GRAPH_PARSE\n#define FF_API_OLD_GRAPH_PARSE              (LIBAVFILTER_VERSION_MAJOR < 5)\n#endif\n#ifndef FF_API_NOCONST_GET_NAME\n#define FF_API_NOCONST_GET_NAME             (LIBAVFILTER_VERSION_MAJOR < 6)\n#endif\n\n#endif /* AVFILTER_VERSION_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavformat/avformat.h",
    "content": "/*\n * copyright (c) 2001 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFORMAT_AVFORMAT_H\n#define AVFORMAT_AVFORMAT_H\n\n/**\n * @file\n * @ingroup libavf\n * Main libavformat public API header\n */\n\n/**\n * @defgroup libavf I/O and Muxing/Demuxing Library\n * @{\n *\n * Libavformat (lavf) is a library for dealing with various media container\n * formats. Its main two purposes are demuxing - i.e. splitting a media file\n * into component streams, and the reverse process of muxing - writing supplied\n * data in a specified container format. It also has an @ref lavf_io\n * \"I/O module\" which supports a number of protocols for accessing the data (e.g.\n * file, tcp, http and others). Before using lavf, you need to call\n * av_register_all() to register all compiled muxers, demuxers and protocols.\n * Unless you are absolutely sure you won't use libavformat's network\n * capabilities, you should also call avformat_network_init().\n *\n * A supported input format is described by an AVInputFormat struct, conversely\n * an output format is described by AVOutputFormat. You can iterate over all\n * registered input/output formats using the av_iformat_next() /\n * av_oformat_next() functions. The protocols layer is not part of the public\n * API, so you can only get the names of supported protocols with the\n * avio_enum_protocols() function.\n *\n * Main lavf structure used for both muxing and demuxing is AVFormatContext,\n * which exports all information about the file being read or written. As with\n * most Libavformat structures, its size is not part of public ABI, so it cannot be\n * allocated on stack or directly with av_malloc(). To create an\n * AVFormatContext, use avformat_alloc_context() (some functions, like\n * avformat_open_input() might do that for you).\n *\n * Most importantly an AVFormatContext contains:\n * @li the @ref AVFormatContext.iformat \"input\" or @ref AVFormatContext.oformat\n * \"output\" format. It is either autodetected or set by user for input;\n * always set by user for output.\n * @li an @ref AVFormatContext.streams \"array\" of AVStreams, which describe all\n * elementary streams stored in the file. AVStreams are typically referred to\n * using their index in this array.\n * @li an @ref AVFormatContext.pb \"I/O context\". It is either opened by lavf or\n * set by user for input, always set by user for output (unless you are dealing\n * with an AVFMT_NOFILE format).\n *\n * @section lavf_options Passing options to (de)muxers\n * Lavf allows to configure muxers and demuxers using the @ref avoptions\n * mechanism. Generic (format-independent) libavformat options are provided by\n * AVFormatContext, they can be examined from a user program by calling\n * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass\n * from avformat_get_class()). Private (format-specific) options are provided by\n * AVFormatContext.priv_data if and only if AVInputFormat.priv_class /\n * AVOutputFormat.priv_class of the corresponding format struct is non-NULL.\n * Further options may be provided by the @ref AVFormatContext.pb \"I/O context\",\n * if its AVClass is non-NULL, and the protocols layer. See the discussion on\n * nesting in @ref avoptions documentation to learn how to access those.\n *\n * @defgroup lavf_decoding Demuxing\n * @{\n * Demuxers read a media file and split it into chunks of data (@em packets). A\n * @ref AVPacket \"packet\" contains one or more encoded frames which belongs to a\n * single elementary stream. In the lavf API this process is represented by the\n * avformat_open_input() function for opening a file, av_read_frame() for\n * reading a single packet and finally avformat_close_input(), which does the\n * cleanup.\n *\n * @section lavf_decoding_open Opening a media file\n * The minimum information required to open a file is its URL or filename, which\n * is passed to avformat_open_input(), as in the following code:\n * @code\n * const char    *url = \"in.mp3\";\n * AVFormatContext *s = NULL;\n * int ret = avformat_open_input(&s, url, NULL, NULL);\n * if (ret < 0)\n *     abort();\n * @endcode\n * The above code attempts to allocate an AVFormatContext, open the\n * specified file (autodetecting the format) and read the header, exporting the\n * information stored there into s. Some formats do not have a header or do not\n * store enough information there, so it is recommended that you call the\n * avformat_find_stream_info() function which tries to read and decode a few\n * frames to find missing information.\n *\n * In some cases you might want to preallocate an AVFormatContext yourself with\n * avformat_alloc_context() and do some tweaking on it before passing it to\n * avformat_open_input(). One such case is when you want to use custom functions\n * for reading input data instead of lavf internal I/O layer.\n * To do that, create your own AVIOContext with avio_alloc_context(), passing\n * your reading callbacks to it. Then set the @em pb field of your\n * AVFormatContext to newly created AVIOContext.\n *\n * Since the format of the opened file is in general not known until after\n * avformat_open_input() has returned, it is not possible to set demuxer private\n * options on a preallocated context. Instead, the options should be passed to\n * avformat_open_input() wrapped in an AVDictionary:\n * @code\n * AVDictionary *options = NULL;\n * av_dict_set(&options, \"video_size\", \"640x480\", 0);\n * av_dict_set(&options, \"pixel_format\", \"rgb24\", 0);\n *\n * if (avformat_open_input(&s, url, NULL, &options) < 0)\n *     abort();\n * av_dict_free(&options);\n * @endcode\n * This code passes the private options 'video_size' and 'pixel_format' to the\n * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it\n * cannot know how to interpret raw video data otherwise. If the format turns\n * out to be something different than raw video, those options will not be\n * recognized by the demuxer and therefore will not be applied. Such unrecognized\n * options are then returned in the options dictionary (recognized options are\n * consumed). The calling program can handle such unrecognized options as it\n * wishes, e.g.\n * @code\n * AVDictionaryEntry *e;\n * if (e = av_dict_get(options, \"\", NULL, AV_DICT_IGNORE_SUFFIX)) {\n *     fprintf(stderr, \"Option %s not recognized by the demuxer.\\n\", e->key);\n *     abort();\n * }\n * @endcode\n *\n * After you have finished reading the file, you must close it with\n * avformat_close_input(). It will free everything associated with the file.\n *\n * @section lavf_decoding_read Reading from an opened file\n * Reading data from an opened AVFormatContext is done by repeatedly calling\n * av_read_frame() on it. Each call, if successful, will return an AVPacket\n * containing encoded data for one AVStream, identified by\n * AVPacket.stream_index. This packet may be passed straight into the libavcodec\n * decoding functions avcodec_decode_video2(), avcodec_decode_audio4() or\n * avcodec_decode_subtitle2() if the caller wishes to decode the data.\n *\n * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be\n * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for\n * pts/dts, 0 for duration) if the stream does not provide them. The timing\n * information will be in AVStream.time_base units, i.e. it has to be\n * multiplied by the timebase to convert them to seconds.\n *\n * If AVPacket.buf is set on the returned packet, then the packet is\n * allocated dynamically and the user may keep it indefinitely.\n * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a\n * static storage somewhere inside the demuxer and the packet is only valid\n * until the next av_read_frame() call or closing the file. If the caller\n * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy\n * of it.\n * In both cases, the packet must be freed with av_free_packet() when it is no\n * longer needed.\n *\n * @section lavf_decoding_seek Seeking\n * @}\n *\n * @defgroup lavf_encoding Muxing\n * @{\n * Muxers take encoded data in the form of @ref AVPacket \"AVPackets\" and write\n * it into files or other output bytestreams in the specified container format.\n *\n * The main API functions for muxing are avformat_write_header() for writing the\n * file header, av_write_frame() / av_interleaved_write_frame() for writing the\n * packets and av_write_trailer() for finalizing the file.\n *\n * At the beginning of the muxing process, the caller must first call\n * avformat_alloc_context() to create a muxing context. The caller then sets up\n * the muxer by filling the various fields in this context:\n *\n * - The @ref AVFormatContext.oformat \"oformat\" field must be set to select the\n *   muxer that will be used.\n * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb\n *   \"pb\" field must be set to an opened IO context, either returned from\n *   avio_open2() or a custom one.\n * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must\n *   be created with the avformat_new_stream() function. The caller should fill\n *   the @ref AVStream.codec \"stream codec context\" information, such as the\n *   codec @ref AVCodecContext.codec_type \"type\", @ref AVCodecContext.codec_id\n *   \"id\" and other parameters (e.g. width / height, the pixel or sample format,\n *   etc.) as known. The @ref AVStream.time_base \"stream timebase\" should\n *   be set to the timebase that the caller desires to use for this stream (note\n *   that the timebase actually used by the muxer can be different, as will be\n *   described later).\n * - It is advised to manually initialize only the relevant fields in\n *   AVCodecContext, rather than using @ref avcodec_copy_context() during\n *   remuxing: there is no guarantee that the codec context values remain valid\n *   for both input and output format contexts.\n * - The caller may fill in additional information, such as @ref\n *   AVFormatContext.metadata \"global\" or @ref AVStream.metadata \"per-stream\"\n *   metadata, @ref AVFormatContext.chapters \"chapters\", @ref\n *   AVFormatContext.programs \"programs\", etc. as described in the\n *   AVFormatContext documentation. Whether such information will actually be\n *   stored in the output depends on what the container format and the muxer\n *   support.\n *\n * When the muxing context is fully set up, the caller must call\n * avformat_write_header() to initialize the muxer internals and write the file\n * header. Whether anything actually is written to the IO context at this step\n * depends on the muxer, but this function must always be called. Any muxer\n * private options must be passed in the options parameter to this function.\n *\n * The data is then sent to the muxer by repeatedly calling av_write_frame() or\n * av_interleaved_write_frame() (consult those functions' documentation for\n * discussion on the difference between them; only one of them may be used with\n * a single muxing context, they should not be mixed). Do note that the timing\n * information on the packets sent to the muxer must be in the corresponding\n * AVStream's timebase. That timebase is set by the muxer (in the\n * avformat_write_header() step) and may be different from the timebase\n * requested by the caller.\n *\n * Once all the data has been written, the caller must call av_write_trailer()\n * to flush any buffered packets and finalize the output file, then close the IO\n * context (if any) and finally free the muxing context with\n * avformat_free_context().\n * @}\n *\n * @defgroup lavf_io I/O Read/Write\n * @{\n * @}\n *\n * @defgroup lavf_codec Demuxers\n * @{\n * @defgroup lavf_codec_native Native Demuxers\n * @{\n * @}\n * @defgroup lavf_codec_wrappers External library wrappers\n * @{\n * @}\n * @}\n * @defgroup lavf_protos I/O Protocols\n * @{\n * @}\n * @defgroup lavf_internal Internal\n * @{\n * @}\n * @}\n *\n */\n\n#include <time.h>\n#include <stdio.h>  /* FILE */\n#include \"libavcodec/avcodec.h\"\n#include \"libavutil/dict.h\"\n#include \"libavutil/log.h\"\n\n#include \"avio.h\"\n#include \"libavformat/version.h\"\n\nstruct AVFormatContext;\n\nstruct AVDeviceInfoList;\nstruct AVDeviceCapabilitiesQuery;\n\n/**\n * @defgroup metadata_api Public Metadata API\n * @{\n * @ingroup libavf\n * The metadata API allows libavformat to export metadata tags to a client\n * application when demuxing. Conversely it allows a client application to\n * set metadata when muxing.\n *\n * Metadata is exported or set as pairs of key/value strings in the 'metadata'\n * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs\n * using the @ref lavu_dict \"AVDictionary\" API. Like all strings in FFmpeg,\n * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata\n * exported by demuxers isn't checked to be valid UTF-8 in most cases.\n *\n * Important concepts to keep in mind:\n * -  Keys are unique; there can never be 2 tags with the same key. This is\n *    also meant semantically, i.e., a demuxer should not knowingly produce\n *    several keys that are literally different but semantically identical.\n *    E.g., key=Author5, key=Author6. In this example, all authors must be\n *    placed in the same tag.\n * -  Metadata is flat, not hierarchical; there are no subtags. If you\n *    want to store, e.g., the email address of the child of producer Alice\n *    and actor Bob, that could have key=alice_and_bobs_childs_email_address.\n * -  Several modifiers can be applied to the tag name. This is done by\n *    appending a dash character ('-') and the modifier name in the order\n *    they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng.\n *    -  language -- a tag whose value is localized for a particular language\n *       is appended with the ISO 639-2/B 3-letter language code.\n *       For example: Author-ger=Michael, Author-eng=Mike\n *       The original/default language is in the unqualified \"Author\" tag.\n *       A demuxer should set a default if it sets any translated tag.\n *    -  sorting  -- a modified version of a tag that should be used for\n *       sorting will have '-sort' appended. E.g. artist=\"The Beatles\",\n *       artist-sort=\"Beatles, The\".\n * - Some protocols and demuxers support metadata updates. After a successful\n *   call to av_read_packet(), AVFormatContext.event_flags or AVStream.event_flags\n *   will be updated to indicate if metadata changed. In order to detect metadata\n *   changes on a stream, you need to loop through all streams in the AVFormatContext\n *   and check their individual event_flags.\n *\n * -  Demuxers attempt to export metadata in a generic format, however tags\n *    with no generic equivalents are left as they are stored in the container.\n *    Follows a list of generic tag names:\n *\n @verbatim\n album        -- name of the set this work belongs to\n album_artist -- main creator of the set/album, if different from artist.\n                 e.g. \"Various Artists\" for compilation albums.\n artist       -- main creator of the work\n comment      -- any additional description of the file.\n composer     -- who composed the work, if different from artist.\n copyright    -- name of copyright holder.\n creation_time-- date when the file was created, preferably in ISO 8601.\n date         -- date when the work was created, preferably in ISO 8601.\n disc         -- number of a subset, e.g. disc in a multi-disc collection.\n encoder      -- name/settings of the software/hardware that produced the file.\n encoded_by   -- person/group who created the file.\n filename     -- original name of the file.\n genre        -- <self-evident>.\n language     -- main language in which the work is performed, preferably\n                 in ISO 639-2 format. Multiple languages can be specified by\n                 separating them with commas.\n performer    -- artist who performed the work, if different from artist.\n                 E.g for \"Also sprach Zarathustra\", artist would be \"Richard\n                 Strauss\" and performer \"London Philharmonic Orchestra\".\n publisher    -- name of the label/publisher.\n service_name     -- name of the service in broadcasting (channel name).\n service_provider -- name of the service provider in broadcasting.\n title        -- name of the work.\n track        -- number of this work in the set, can be in form current/total.\n variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of\n @endverbatim\n *\n * Look in the examples section for an application example how to use the Metadata API.\n *\n * @}\n */\n\n/* packet functions */\n\n\n/**\n * Allocate and read the payload of a packet and initialize its\n * fields with default values.\n *\n * @param s    associated IO context\n * @param pkt packet\n * @param size desired payload size\n * @return >0 (read size) if OK, AVERROR_xxx otherwise\n */\nint av_get_packet(AVIOContext *s, AVPacket *pkt, int size);\n\n\n/**\n * Read data and append it to the current content of the AVPacket.\n * If pkt->size is 0 this is identical to av_get_packet.\n * Note that this uses av_grow_packet and thus involves a realloc\n * which is inefficient. Thus this function should only be used\n * when there is no reasonable way to know (an upper bound of)\n * the final size.\n *\n * @param s    associated IO context\n * @param pkt packet\n * @param size amount of data to read\n * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data\n *         will not be lost even if an error occurs.\n */\nint av_append_packet(AVIOContext *s, AVPacket *pkt, int size);\n\n#if FF_API_LAVF_FRAC\n/*************************************************/\n/* fractional numbers for exact pts handling */\n\n/**\n * The exact value of the fractional number is: 'val + num / den'.\n * num is assumed to be 0 <= num < den.\n */\ntypedef struct AVFrac {\n    int64_t val, num, den;\n} AVFrac;\n#endif\n\n/*************************************************/\n/* input/output formats */\n\nstruct AVCodecTag;\n\n/**\n * This structure contains the data a format has to probe a file.\n */\ntypedef struct AVProbeData {\n    const char *filename;\n    unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */\n    int buf_size;       /**< Size of buf except extra allocated bytes */\n    const char *mime_type; /**< mime_type, when known. */\n} AVProbeData;\n\n#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4)\n#define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1)\n\n#define AVPROBE_SCORE_EXTENSION  50 ///< score for file extension\n#define AVPROBE_SCORE_MIME       75 ///< score for file mime type\n#define AVPROBE_SCORE_MAX       100 ///< maximum score\n\n#define AVPROBE_PADDING_SIZE 32             ///< extra allocated bytes at the end of the probe buffer\n\n/// Demuxer will use avio_open, no opened file should be provided by the caller.\n#define AVFMT_NOFILE        0x0001\n#define AVFMT_NEEDNUMBER    0x0002 /**< Needs '%d' in filename. */\n#define AVFMT_SHOW_IDS      0x0008 /**< Show format stream IDs numbers. */\n#define AVFMT_RAWPICTURE    0x0020 /**< Format wants AVPicture structure for\n                                      raw picture data. */\n#define AVFMT_GLOBALHEADER  0x0040 /**< Format wants global header. */\n#define AVFMT_NOTIMESTAMPS  0x0080 /**< Format does not need / have any timestamps. */\n#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */\n#define AVFMT_TS_DISCONT    0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */\n#define AVFMT_VARIABLE_FPS  0x0400 /**< Format allows variable fps. */\n#define AVFMT_NODIMENSIONS  0x0800 /**< Format does not need width/height */\n#define AVFMT_NOSTREAMS     0x1000 /**< Format does not require any streams */\n#define AVFMT_NOBINSEARCH   0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */\n#define AVFMT_NOGENSEARCH   0x4000 /**< Format does not allow to fall back on generic search */\n#define AVFMT_NO_BYTE_SEEK  0x8000 /**< Format does not allow seeking by bytes */\n#define AVFMT_ALLOW_FLUSH  0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */\n#if LIBAVFORMAT_VERSION_MAJOR <= 54\n#define AVFMT_TS_NONSTRICT 0x8020000 //we try to be compatible to the ABIs of ffmpeg and major forks\n#else\n#define AVFMT_TS_NONSTRICT 0x20000\n#endif\n                                   /**< Format does not require strictly\n                                        increasing timestamps, but they must\n                                        still be monotonic */\n#define AVFMT_TS_NEGATIVE  0x40000 /**< Format allows muxing negative\n                                        timestamps. If not set the timestamp\n                                        will be shifted in av_write_frame and\n                                        av_interleaved_write_frame so they\n                                        start from 0.\n                                        The user or muxer can override this through\n                                        AVFormatContext.avoid_negative_ts\n                                        */\n\n#define AVFMT_SEEK_TO_PTS   0x4000000 /**< Seeking is based on PTS */\n\n/**\n * @addtogroup lavf_encoding\n * @{\n */\ntypedef struct AVOutputFormat {\n    const char *name;\n    /**\n     * Descriptive name for the format, meant to be more human-readable\n     * than name. You should use the NULL_IF_CONFIG_SMALL() macro\n     * to define it.\n     */\n    const char *long_name;\n    const char *mime_type;\n    const char *extensions; /**< comma-separated filename extensions */\n    /* output support */\n    enum AVCodecID audio_codec;    /**< default audio codec */\n    enum AVCodecID video_codec;    /**< default video codec */\n    enum AVCodecID subtitle_codec; /**< default subtitle codec */\n    /**\n     * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE,\n     * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS,\n     * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH,\n     * AVFMT_TS_NONSTRICT\n     */\n    int flags;\n\n    /**\n     * List of supported codec_id-codec_tag pairs, ordered by \"better\n     * choice first\". The arrays are all terminated by AV_CODEC_ID_NONE.\n     */\n    const struct AVCodecTag * const *codec_tag;\n\n\n    const AVClass *priv_class; ///< AVClass for the private context\n\n    /*****************************************************************\n     * No fields below this line are part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    struct AVOutputFormat *next;\n    /**\n     * size of private data so that it can be allocated in the wrapper\n     */\n    int priv_data_size;\n\n    int (*write_header)(struct AVFormatContext *);\n    /**\n     * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags,\n     * pkt can be NULL in order to flush data buffered in the muxer.\n     * When flushing, return 0 if there still is more data to flush,\n     * or 1 if everything was flushed and there is no more buffered\n     * data.\n     */\n    int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);\n    int (*write_trailer)(struct AVFormatContext *);\n    /**\n     * Currently only used to set pixel format if not YUV420P.\n     */\n    int (*interleave_packet)(struct AVFormatContext *, AVPacket *out,\n                             AVPacket *in, int flush);\n    /**\n     * Test if the given codec can be stored in this container.\n     *\n     * @return 1 if the codec is supported, 0 if it is not.\n     *         A negative number if unknown.\n     *         MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC\n     */\n    int (*query_codec)(enum AVCodecID id, int std_compliance);\n\n    void (*get_output_timestamp)(struct AVFormatContext *s, int stream,\n                                 int64_t *dts, int64_t *wall);\n    /**\n     * Allows sending messages from application to device.\n     */\n    int (*control_message)(struct AVFormatContext *s, int type,\n                           void *data, size_t data_size);\n\n    /**\n     * Write an uncoded AVFrame.\n     *\n     * See av_write_uncoded_frame() for details.\n     *\n     * The library will free *frame afterwards, but the muxer can prevent it\n     * by setting the pointer to NULL.\n     */\n    int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index,\n                               AVFrame **frame, unsigned flags);\n    /**\n     * Returns device list with it properties.\n     * @see avdevice_list_devices() for more details.\n     */\n    int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);\n    /**\n     * Initialize device capabilities submodule.\n     * @see avdevice_capabilities_create() for more details.\n     */\n    int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);\n    /**\n     * Free device capabilities submodule.\n     * @see avdevice_capabilities_free() for more details.\n     */\n    int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);\n    enum AVCodecID data_codec; /**< default data codec */\n} AVOutputFormat;\n/**\n * @}\n */\n\n/**\n * @addtogroup lavf_decoding\n * @{\n */\ntypedef struct AVInputFormat {\n    /**\n     * A comma separated list of short names for the format. New names\n     * may be appended with a minor bump.\n     */\n    const char *name;\n\n    /**\n     * Descriptive name for the format, meant to be more human-readable\n     * than name. You should use the NULL_IF_CONFIG_SMALL() macro\n     * to define it.\n     */\n    const char *long_name;\n\n    /**\n     * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS,\n     * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH,\n     * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.\n     */\n    int flags;\n\n    /**\n     * If extensions are defined, then no probe is done. You should\n     * usually not use extension format guessing because it is not\n     * reliable enough\n     */\n    const char *extensions;\n\n    const struct AVCodecTag * const *codec_tag;\n\n    const AVClass *priv_class; ///< AVClass for the private context\n\n    /**\n     * Comma-separated list of mime types.\n     * It is used check for matching mime types while probing.\n     * @see av_probe_input_format2\n     */\n    const char *mime_type;\n\n    /*****************************************************************\n     * No fields below this line are part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    struct AVInputFormat *next;\n\n    /**\n     * Raw demuxers store their codec ID here.\n     */\n    int raw_codec_id;\n\n    /**\n     * Size of private data so that it can be allocated in the wrapper.\n     */\n    int priv_data_size;\n\n    /**\n     * Tell if a given file has a chance of being parsed as this format.\n     * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes\n     * big so you do not have to check for that unless you need more.\n     */\n    int (*read_probe)(AVProbeData *);\n\n    /**\n     * Read the format header and initialize the AVFormatContext\n     * structure. Return 0 if OK. 'avformat_new_stream' should be\n     * called to create new streams.\n     */\n    int (*read_header)(struct AVFormatContext *);\n\n    /**\n     * Read one packet and put it in 'pkt'. pts and flags are also\n     * set. 'avformat_new_stream' can be called only if the flag\n     * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a\n     * background thread).\n     * @return 0 on success, < 0 on error.\n     *         When returning an error, pkt must not have been allocated\n     *         or must be freed before returning\n     */\n    int (*read_packet)(struct AVFormatContext *, AVPacket *pkt);\n\n    /**\n     * Close the stream. The AVFormatContext and AVStreams are not\n     * freed by this function\n     */\n    int (*read_close)(struct AVFormatContext *);\n\n    /**\n     * Seek to a given timestamp relative to the frames in\n     * stream component stream_index.\n     * @param stream_index Must not be -1.\n     * @param flags Selects which direction should be preferred if no exact\n     *              match is available.\n     * @return >= 0 on success (but not necessarily the new offset)\n     */\n    int (*read_seek)(struct AVFormatContext *,\n                     int stream_index, int64_t timestamp, int flags);\n\n    /**\n     * Get the next timestamp in stream[stream_index].time_base units.\n     * @return the timestamp or AV_NOPTS_VALUE if an error occurred\n     */\n    int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,\n                              int64_t *pos, int64_t pos_limit);\n\n    /**\n     * Start/resume playing - only meaningful if using a network-based format\n     * (RTSP).\n     */\n    int (*read_play)(struct AVFormatContext *);\n\n    /**\n     * Pause playing - only meaningful if using a network-based format\n     * (RTSP).\n     */\n    int (*read_pause)(struct AVFormatContext *);\n\n    /**\n     * Seek to timestamp ts.\n     * Seeking will be done so that the point from which all active streams\n     * can be presented successfully will be closest to ts and within min/max_ts.\n     * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL.\n     */\n    int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);\n\n    /**\n     * Returns device list with it properties.\n     * @see avdevice_list_devices() for more details.\n     */\n    int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);\n\n    /**\n     * Initialize device capabilities submodule.\n     * @see avdevice_capabilities_create() for more details.\n     */\n    int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);\n\n    /**\n     * Free device capabilities submodule.\n     * @see avdevice_capabilities_free() for more details.\n     */\n    int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);\n} AVInputFormat;\n/**\n * @}\n */\n\nenum AVStreamParseType {\n    AVSTREAM_PARSE_NONE,\n    AVSTREAM_PARSE_FULL,       /**< full parsing and repack */\n    AVSTREAM_PARSE_HEADERS,    /**< Only parse headers, do not repack. */\n    AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */\n    AVSTREAM_PARSE_FULL_ONCE,  /**< full parsing and repack of the first frame only, only implemented for H.264 currently */\n    AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'),       /**< full parsing and repack with timestamp and position generation by parser for raw\n                                                             this assumes that each packet in the file contains no demuxer level headers and\n                                                             just codec level data, otherwise position generation would fail */\n};\n\ntypedef struct AVIndexEntry {\n    int64_t pos;\n    int64_t timestamp;        /**<\n                               * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available\n                               * when seeking to this entry. That means preferable PTS on keyframe based formats.\n                               * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better\n                               * is known\n                               */\n#define AVINDEX_KEYFRAME 0x0001\n    int flags:2;\n    int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment).\n    int min_distance;         /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */\n} AVIndexEntry;\n\n#define AV_DISPOSITION_DEFAULT   0x0001\n#define AV_DISPOSITION_DUB       0x0002\n#define AV_DISPOSITION_ORIGINAL  0x0004\n#define AV_DISPOSITION_COMMENT   0x0008\n#define AV_DISPOSITION_LYRICS    0x0010\n#define AV_DISPOSITION_KARAOKE   0x0020\n\n/**\n * Track should be used during playback by default.\n * Useful for subtitle track that should be displayed\n * even when user did not explicitly ask for subtitles.\n */\n#define AV_DISPOSITION_FORCED    0x0040\n#define AV_DISPOSITION_HEARING_IMPAIRED  0x0080  /**< stream for hearing impaired audiences */\n#define AV_DISPOSITION_VISUAL_IMPAIRED   0x0100  /**< stream for visual impaired audiences */\n#define AV_DISPOSITION_CLEAN_EFFECTS     0x0200  /**< stream without voice */\n/**\n * The stream is stored in the file as an attached picture/\"cover art\" (e.g.\n * APIC frame in ID3v2). The single packet associated with it will be returned\n * among the first few packets read from the file unless seeking takes place.\n * It can also be accessed at any time in AVStream.attached_pic.\n */\n#define AV_DISPOSITION_ATTACHED_PIC      0x0400\n\n/**\n * To specify text track kind (different from subtitles default).\n */\n#define AV_DISPOSITION_CAPTIONS     0x10000\n#define AV_DISPOSITION_DESCRIPTIONS 0x20000\n#define AV_DISPOSITION_METADATA     0x40000\n\n/**\n * Options for behavior on timestamp wrap detection.\n */\n#define AV_PTS_WRAP_IGNORE      0   ///< ignore the wrap\n#define AV_PTS_WRAP_ADD_OFFSET  1   ///< add the format specific offset on wrap detection\n#define AV_PTS_WRAP_SUB_OFFSET  -1  ///< subtract the format specific offset on wrap detection\n\n/**\n * Stream structure.\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * sizeof(AVStream) must not be used outside libav*.\n */\ntypedef struct AVStream {\n    int index;    /**< stream index in AVFormatContext */\n    /**\n     * Format-specific stream ID.\n     * decoding: set by libavformat\n     * encoding: set by the user, replaced by libavformat if left unset\n     */\n    int id;\n    /**\n     * Codec context associated with this stream. Allocated and freed by\n     * libavformat.\n     *\n     * - decoding: The demuxer exports codec information stored in the headers\n     *             here.\n     * - encoding: The user sets codec information, the muxer writes it to the\n     *             output. Mandatory fields as specified in AVCodecContext\n     *             documentation must be set even if this AVCodecContext is\n     *             not actually used for encoding.\n     */\n    AVCodecContext *codec;\n    void *priv_data;\n\n#if FF_API_LAVF_FRAC\n    /**\n     * @deprecated this field is unused\n     */\n    attribute_deprecated\n    struct AVFrac pts;\n#endif\n\n    /**\n     * This is the fundamental unit of time (in seconds) in terms\n     * of which frame timestamps are represented.\n     *\n     * decoding: set by libavformat\n     * encoding: May be set by the caller before avformat_write_header() to\n     *           provide a hint to the muxer about the desired timebase. In\n     *           avformat_write_header(), the muxer will overwrite this field\n     *           with the timebase that will actually be used for the timestamps\n     *           written into the file (which may or may not be related to the\n     *           user-provided one, depending on the format).\n     */\n    AVRational time_base;\n\n    /**\n     * Decoding: pts of the first frame of the stream in presentation order, in stream time base.\n     * Only set this if you are absolutely 100% sure that the value you set\n     * it to really is the pts of the first frame.\n     * This may be undefined (AV_NOPTS_VALUE).\n     * @note The ASF header does NOT contain a correct start_time the ASF\n     * demuxer must NOT set this.\n     */\n    int64_t start_time;\n\n    /**\n     * Decoding: duration of the stream, in stream time base.\n     * If a source file does not specify a duration, but does specify\n     * a bitrate, this value will be estimated from bitrate and file size.\n     */\n    int64_t duration;\n\n    int64_t nb_frames;                 ///< number of frames in this stream if known or 0\n\n    int disposition; /**< AV_DISPOSITION_* bit field */\n\n    enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed.\n\n    /**\n     * sample aspect ratio (0 if unknown)\n     * - encoding: Set by user.\n     * - decoding: Set by libavformat.\n     */\n    AVRational sample_aspect_ratio;\n\n    AVDictionary *metadata;\n\n    /**\n     * Average framerate\n     *\n     * - demuxing: May be set by libavformat when creating the stream or in\n     *             avformat_find_stream_info().\n     * - muxing: May be set by the caller before avformat_write_header().\n     */\n    AVRational avg_frame_rate;\n\n    /**\n     * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet\n     * will contain the attached picture.\n     *\n     * decoding: set by libavformat, must not be modified by the caller.\n     * encoding: unused\n     */\n    AVPacket attached_pic;\n\n    /**\n     * An array of side data that applies to the whole stream (i.e. the\n     * container does not allow it to change between packets).\n     *\n     * There may be no overlap between the side data in this array and side data\n     * in the packets. I.e. a given side data is either exported by the muxer\n     * (demuxing) / set by the caller (muxing) in this array, then it never\n     * appears in the packets, or the side data is exported / sent through\n     * the packets (always in the first packet where the value becomes known or\n     * changes), then it does not appear in this array.\n     *\n     * - demuxing: Set by libavformat when the stream is created.\n     * - muxing: May be set by the caller before avformat_write_header().\n     *\n     * Freed by libavformat in avformat_free_context().\n     *\n     * @see av_format_inject_global_side_data()\n     */\n    AVPacketSideData *side_data;\n    /**\n     * The number of elements in the AVStream.side_data array.\n     */\n    int            nb_side_data;\n\n    /**\n     * Flags for the user to detect events happening on the stream. Flags must\n     * be cleared by the user once the event has been handled.\n     * A combination of AVSTREAM_EVENT_FLAG_*.\n     */\n    int event_flags;\n#define AVSTREAM_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata.\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n\n    /**\n     * Stream information used internally by av_find_stream_info()\n     */\n#define MAX_STD_TIMEBASES (30*12+7+6)\n    struct {\n        int64_t last_dts;\n        int64_t duration_gcd;\n        int duration_count;\n        int64_t rfps_duration_sum;\n        double (*duration_error)[2][MAX_STD_TIMEBASES];\n        int64_t codec_info_duration;\n        int64_t codec_info_duration_fields;\n\n        /**\n         * 0  -> decoder has not been searched for yet.\n         * >0 -> decoder found\n         * <0 -> decoder with codec_id == -found_decoder has not been found\n         */\n        int found_decoder;\n\n        int64_t last_duration;\n\n        /**\n         * Those are used for average framerate estimation.\n         */\n        int64_t fps_first_dts;\n        int     fps_first_dts_idx;\n        int64_t fps_last_dts;\n        int     fps_last_dts_idx;\n\n    } *info;\n\n    int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */\n\n    // Timestamp generation support:\n    /**\n     * Timestamp corresponding to the last dts sync point.\n     *\n     * Initialized when AVCodecParserContext.dts_sync_point >= 0 and\n     * a DTS is received from the underlying container. Otherwise set to\n     * AV_NOPTS_VALUE by default.\n     */\n    int64_t first_dts;\n    int64_t cur_dts;\n    int64_t last_IP_pts;\n    int last_IP_duration;\n\n    /**\n     * Number of packets to buffer for codec probing\n     */\n#define MAX_PROBE_PACKETS 2500\n    int probe_packets;\n\n    /**\n     * Number of frames that have been demuxed during av_find_stream_info()\n     */\n    int codec_info_nb_frames;\n\n    /* av_read_frame() support */\n    enum AVStreamParseType need_parsing;\n    struct AVCodecParserContext *parser;\n\n    /**\n     * last packet in packet_buffer for this stream when muxing.\n     */\n    struct AVPacketList *last_in_packet_buffer;\n    AVProbeData probe_data;\n#define MAX_REORDER_DELAY 16\n    int64_t pts_buffer[MAX_REORDER_DELAY+1];\n\n    AVIndexEntry *index_entries; /**< Only used if the format does not\n                                    support seeking natively. */\n    int nb_index_entries;\n    unsigned int index_entries_allocated_size;\n\n    /**\n     * Real base framerate of the stream.\n     * This is the lowest framerate with which all timestamps can be\n     * represented accurately (it is the least common multiple of all\n     * framerates in the stream). Note, this value is just a guess!\n     * For example, if the time base is 1/90000 and all frames have either\n     * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1.\n     *\n     * Code outside avformat should access this field using:\n     * av_stream_get/set_r_frame_rate(stream)\n     */\n    AVRational r_frame_rate;\n\n    /**\n     * Stream Identifier\n     * This is the MPEG-TS stream identifier +1\n     * 0 means unknown\n     */\n    int stream_identifier;\n\n    int64_t interleaver_chunk_size;\n    int64_t interleaver_chunk_duration;\n\n    /**\n     * stream probing state\n     * -1   -> probing finished\n     *  0   -> no probing requested\n     * rest -> perform probing with request_probe being the minimum score to accept.\n     * NOT PART OF PUBLIC API\n     */\n    int request_probe;\n    /**\n     * Indicates that everything up to the next keyframe\n     * should be discarded.\n     */\n    int skip_to_keyframe;\n\n    /**\n     * Number of samples to skip at the start of the frame decoded from the next packet.\n     */\n    int skip_samples;\n\n    /**\n     * If not 0, the first audio sample that should be discarded from the stream.\n     * This is broken by design (needs global sample count), but can't be\n     * avoided for broken by design formats such as mp3 with ad-hoc gapless\n     * audio support.\n     */\n    int64_t first_discard_sample;\n\n    /**\n     * The sample after last sample that is intended to be discarded after\n     * first_discard_sample. Works on frame boundaries only. Used to prevent\n     * early EOF if the gapless info is broken (considered concatenated mp3s).\n     */\n    int64_t last_discard_sample;\n\n    /**\n     * Number of internally decoded frames, used internally in libavformat, do not access\n     * its lifetime differs from info which is why it is not in that structure.\n     */\n    int nb_decoded_frames;\n\n    /**\n     * Timestamp offset added to timestamps before muxing\n     * NOT PART OF PUBLIC API\n     */\n    int64_t mux_ts_offset;\n\n    /**\n     * Internal data to check for wrapping of the time stamp\n     */\n    int64_t pts_wrap_reference;\n\n    /**\n     * Options for behavior, when a wrap is detected.\n     *\n     * Defined by AV_PTS_WRAP_ values.\n     *\n     * If correction is enabled, there are two possibilities:\n     * If the first time stamp is near the wrap point, the wrap offset\n     * will be subtracted, which will create negative time stamps.\n     * Otherwise the offset will be added.\n     */\n    int pts_wrap_behavior;\n\n    /**\n     * Internal data to prevent doing update_initial_durations() twice\n     */\n    int update_initial_durations_done;\n\n    /**\n     * Internal data to generate dts from pts\n     */\n    int64_t pts_reorder_error[MAX_REORDER_DELAY+1];\n    uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1];\n\n    /**\n     * Internal data to analyze DTS and detect faulty mpeg streams\n     */\n    int64_t last_dts_for_order_check;\n    uint8_t dts_ordered;\n    uint8_t dts_misordered;\n\n    /**\n     * Internal data to inject global side data\n     */\n    int inject_global_side_data;\n\n    /**\n     * String containing paris of key and values describing recommended encoder configuration.\n     * Paris are separated by ','.\n     * Keys are separated from values by '='.\n     */\n    char *recommended_encoder_configuration;\n\n    /**\n     * display aspect ratio (0 if unknown)\n     * - encoding: unused\n     * - decoding: Set by libavformat to calculate sample_aspect_ratio internally\n     */\n    AVRational display_aspect_ratio;\n} AVStream;\n\nAVRational av_stream_get_r_frame_rate(const AVStream *s);\nvoid       av_stream_set_r_frame_rate(AVStream *s, AVRational r);\nstruct AVCodecParserContext *av_stream_get_parser(const AVStream *s);\nchar* av_stream_get_recommended_encoder_configuration(const AVStream *s);\nvoid  av_stream_set_recommended_encoder_configuration(AVStream *s, char *configuration);\n\n/**\n * Returns the pts of the last muxed packet + its duration\n *\n * the retuned value is undefined when used with a demuxer.\n */\nint64_t    av_stream_get_end_pts(const AVStream *st);\n\n#define AV_PROGRAM_RUNNING 1\n\n/**\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * sizeof(AVProgram) must not be used outside libav*.\n */\ntypedef struct AVProgram {\n    int            id;\n    int            flags;\n    enum AVDiscard discard;        ///< selects which program to discard and which to feed to the caller\n    unsigned int   *stream_index;\n    unsigned int   nb_stream_indexes;\n    AVDictionary *metadata;\n\n    int program_num;\n    int pmt_pid;\n    int pcr_pid;\n\n    /*****************************************************************\n     * All fields below this line are not part of the public API. They\n     * may not be used outside of libavformat and can be changed and\n     * removed at will.\n     * New public fields should be added right above.\n     *****************************************************************\n     */\n    int64_t start_time;\n    int64_t end_time;\n\n    int64_t pts_wrap_reference;    ///< reference dts for wrap detection\n    int pts_wrap_behavior;         ///< behavior on wrap detection\n} AVProgram;\n\n#define AVFMTCTX_NOHEADER      0x0001 /**< signal that no header is present\n                                         (streams are added dynamically) */\n\ntypedef struct AVChapter {\n    int id;                 ///< unique ID to identify the chapter\n    AVRational time_base;   ///< time base in which the start/end timestamps are specified\n    int64_t start, end;     ///< chapter start/end time in time_base units\n    AVDictionary *metadata;\n} AVChapter;\n\n\n/**\n * Callback used by devices to communicate with application.\n */\ntypedef int (*av_format_control_message)(struct AVFormatContext *s, int type,\n                                         void *data, size_t data_size);\n\n\n/**\n * The duration of a video can be estimated through various ways, and this enum can be used\n * to know how the duration was estimated.\n */\nenum AVDurationEstimationMethod {\n    AVFMT_DURATION_FROM_PTS,    ///< Duration accurately estimated from PTSes\n    AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration\n    AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate)\n};\n\ntypedef struct AVFormatInternal AVFormatInternal;\n\n/**\n * Format I/O context.\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * sizeof(AVFormatContext) must not be used outside libav*, use\n * avformat_alloc_context() to create an AVFormatContext.\n */\ntypedef struct AVFormatContext {\n    /**\n     * A class for logging and @ref avoptions. Set by avformat_alloc_context().\n     * Exports (de)muxer private options if they exist.\n     */\n    const AVClass *av_class;\n\n    /**\n     * The input container format.\n     *\n     * Demuxing only, set by avformat_open_input().\n     */\n    struct AVInputFormat *iformat;\n\n    /**\n     * The output container format.\n     *\n     * Muxing only, must be set by the caller before avformat_write_header().\n     */\n    struct AVOutputFormat *oformat;\n\n    /**\n     * Format private data. This is an AVOptions-enabled struct\n     * if and only if iformat/oformat.priv_class is not NULL.\n     *\n     * - muxing: set by avformat_write_header()\n     * - demuxing: set by avformat_open_input()\n     */\n    void *priv_data;\n\n    /**\n     * I/O context.\n     *\n     * - demuxing: either set by the user before avformat_open_input() (then\n     *             the user must close it manually) or set by avformat_open_input().\n     * - muxing: set by the user before avformat_write_header(). The caller must\n     *           take care of closing / freeing the IO context.\n     *\n     * Do NOT set this field if AVFMT_NOFILE flag is set in\n     * iformat/oformat.flags. In such a case, the (de)muxer will handle\n     * I/O in some other way and this field will be NULL.\n     */\n    AVIOContext *pb;\n\n    /* stream info */\n    /**\n     * Flags signalling stream properties. A combination of AVFMTCTX_*.\n     * Set by libavformat.\n     */\n    int ctx_flags;\n\n    /**\n     * Number of elements in AVFormatContext.streams.\n     *\n     * Set by avformat_new_stream(), must not be modified by any other code.\n     */\n    unsigned int nb_streams;\n    /**\n     * A list of all streams in the file. New streams are created with\n     * avformat_new_stream().\n     *\n     * - demuxing: streams are created by libavformat in avformat_open_input().\n     *             If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also\n     *             appear in av_read_frame().\n     * - muxing: streams are created by the user before avformat_write_header().\n     *\n     * Freed by libavformat in avformat_free_context().\n     */\n    AVStream **streams;\n\n    /**\n     * input or output filename\n     *\n     * - demuxing: set by avformat_open_input()\n     * - muxing: may be set by the caller before avformat_write_header()\n     */\n    char filename[1024];\n\n    /**\n     * Position of the first frame of the component, in\n     * AV_TIME_BASE fractional seconds. NEVER set this value directly:\n     * It is deduced from the AVStream values.\n     *\n     * Demuxing only, set by libavformat.\n     */\n    int64_t start_time;\n\n    /**\n     * Duration of the stream, in AV_TIME_BASE fractional\n     * seconds. Only set this value if you know none of the individual stream\n     * durations and also do not set any of them. This is deduced from the\n     * AVStream values if not set.\n     *\n     * Demuxing only, set by libavformat.\n     */\n    int64_t duration;\n\n    /**\n     * Total stream bitrate in bit/s, 0 if not\n     * available. Never set it directly if the file_size and the\n     * duration are known as FFmpeg can compute it automatically.\n     */\n    int bit_rate;\n\n    unsigned int packet_size;\n    int max_delay;\n\n    /**\n     * Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*.\n     * Set by the user before avformat_open_input() / avformat_write_header().\n     */\n    int flags;\n#define AVFMT_FLAG_GENPTS       0x0001 ///< Generate missing pts even if it requires parsing future frames.\n#define AVFMT_FLAG_IGNIDX       0x0002 ///< Ignore index.\n#define AVFMT_FLAG_NONBLOCK     0x0004 ///< Do not block when reading packets from input.\n#define AVFMT_FLAG_IGNDTS       0x0008 ///< Ignore DTS on frames that contain both DTS & PTS\n#define AVFMT_FLAG_NOFILLIN     0x0010 ///< Do not infer any values from other values, just return what is stored in the container\n#define AVFMT_FLAG_NOPARSE      0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled\n#define AVFMT_FLAG_NOBUFFER     0x0040 ///< Do not buffer frames when possible\n#define AVFMT_FLAG_CUSTOM_IO    0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it.\n#define AVFMT_FLAG_DISCARD_CORRUPT  0x0100 ///< Discard frames marked corrupted\n#define AVFMT_FLAG_FLUSH_PACKETS    0x0200 ///< Flush the AVIOContext every packet.\n/**\n * When muxing, try to avoid writing any random/volatile data to the output.\n * This includes any random IDs, real-time timestamps/dates, muxer version, etc.\n *\n * This flag is mainly intended for testing.\n */\n#define AVFMT_FLAG_BITEXACT         0x0400\n#define AVFMT_FLAG_MP4A_LATM    0x8000 ///< Enable RTP MP4A-LATM payload\n#define AVFMT_FLAG_SORT_DTS    0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)\n#define AVFMT_FLAG_PRIV_OPT    0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)\n#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate.\n\n    /**\n     * @deprecated deprecated in favor of probesize2\n     */\n    unsigned int probesize;\n\n    /**\n     * @deprecated deprecated in favor of max_analyze_duration2\n     */\n    attribute_deprecated\n    int max_analyze_duration;\n\n    const uint8_t *key;\n    int keylen;\n\n    unsigned int nb_programs;\n    AVProgram **programs;\n\n    /**\n     * Forced video codec_id.\n     * Demuxing: Set by user.\n     */\n    enum AVCodecID video_codec_id;\n\n    /**\n     * Forced audio codec_id.\n     * Demuxing: Set by user.\n     */\n    enum AVCodecID audio_codec_id;\n\n    /**\n     * Forced subtitle codec_id.\n     * Demuxing: Set by user.\n     */\n    enum AVCodecID subtitle_codec_id;\n\n    /**\n     * Maximum amount of memory in bytes to use for the index of each stream.\n     * If the index exceeds this size, entries will be discarded as\n     * needed to maintain a smaller size. This can lead to slower or less\n     * accurate seeking (depends on demuxer).\n     * Demuxers for which a full in-memory index is mandatory will ignore\n     * this.\n     * - muxing: unused\n     * - demuxing: set by user\n     */\n    unsigned int max_index_size;\n\n    /**\n     * Maximum amount of memory in bytes to use for buffering frames\n     * obtained from realtime capture devices.\n     */\n    unsigned int max_picture_buffer;\n\n    /**\n     * Number of chapters in AVChapter array.\n     * When muxing, chapters are normally written in the file header,\n     * so nb_chapters should normally be initialized before write_header\n     * is called. Some muxers (e.g. mov and mkv) can also write chapters\n     * in the trailer.  To write chapters in the trailer, nb_chapters\n     * must be zero when write_header is called and non-zero when\n     * write_trailer is called.\n     * - muxing: set by user\n     * - demuxing: set by libavformat\n     */\n    unsigned int nb_chapters;\n    AVChapter **chapters;\n\n    /**\n     * Metadata that applies to the whole file.\n     *\n     * - demuxing: set by libavformat in avformat_open_input()\n     * - muxing: may be set by the caller before avformat_write_header()\n     *\n     * Freed by libavformat in avformat_free_context().\n     */\n    AVDictionary *metadata;\n\n    /**\n     * Start time of the stream in real world time, in microseconds\n     * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the\n     * stream was captured at this real world time.\n     * - muxing: Set by the caller before avformat_write_header(). If set to\n     *           either 0 or AV_NOPTS_VALUE, then the current wall-time will\n     *           be used.\n     * - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that\n     *             the value may become known after some number of frames\n     *             have been received.\n     */\n    int64_t start_time_realtime;\n\n    /**\n     * The number of frames used for determining the framerate in\n     * avformat_find_stream_info().\n     * Demuxing only, set by the caller before avformat_find_stream_info().\n     */\n    int fps_probe_size;\n\n    /**\n     * Error recognition; higher values will detect more errors but may\n     * misdetect some more or less valid parts as errors.\n     * Demuxing only, set by the caller before avformat_open_input().\n     */\n    int error_recognition;\n\n    /**\n     * Custom interrupt callbacks for the I/O layer.\n     *\n     * demuxing: set by the user before avformat_open_input().\n     * muxing: set by the user before avformat_write_header()\n     * (mainly useful for AVFMT_NOFILE formats). The callback\n     * should also be passed to avio_open2() if it's used to\n     * open the file.\n     */\n    AVIOInterruptCB interrupt_callback;\n\n    /**\n     * Flags to enable debugging.\n     */\n    int debug;\n#define FF_FDEBUG_TS        0x0001\n\n    /**\n     * Maximum buffering duration for interleaving.\n     *\n     * To ensure all the streams are interleaved correctly,\n     * av_interleaved_write_frame() will wait until it has at least one packet\n     * for each stream before actually writing any packets to the output file.\n     * When some streams are \"sparse\" (i.e. there are large gaps between\n     * successive packets), this can result in excessive buffering.\n     *\n     * This field specifies the maximum difference between the timestamps of the\n     * first and the last packet in the muxing queue, above which libavformat\n     * will output a packet regardless of whether it has queued a packet for all\n     * the streams.\n     *\n     * Muxing only, set by the caller before avformat_write_header().\n     */\n    int64_t max_interleave_delta;\n\n    /**\n     * Allow non-standard and experimental extension\n     * @see AVCodecContext.strict_std_compliance\n     */\n    int strict_std_compliance;\n\n    /**\n     * Flags for the user to detect events happening on the file. Flags must\n     * be cleared by the user once the event has been handled.\n     * A combination of AVFMT_EVENT_FLAG_*.\n     */\n    int event_flags;\n#define AVFMT_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata.\n\n    /**\n     * Maximum number of packets to read while waiting for the first timestamp.\n     * Decoding only.\n     */\n    int max_ts_probe;\n\n    /**\n     * Avoid negative timestamps during muxing.\n     * Any value of the AVFMT_AVOID_NEG_TS_* constants.\n     * Note, this only works when using av_interleaved_write_frame. (interleave_packet_per_dts is in use)\n     * - muxing: Set by user\n     * - demuxing: unused\n     */\n    int avoid_negative_ts;\n#define AVFMT_AVOID_NEG_TS_AUTO             -1 ///< Enabled when required by target format\n#define AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE 1 ///< Shift timestamps so they are non negative\n#define AVFMT_AVOID_NEG_TS_MAKE_ZERO         2 ///< Shift timestamps so that they start at 0\n\n    /**\n     * Transport stream id.\n     * This will be moved into demuxer private options. Thus no API/ABI compatibility\n     */\n    int ts_id;\n\n    /**\n     * Audio preload in microseconds.\n     * Note, not all formats support this and unpredictable things may happen if it is used when not supported.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int audio_preload;\n\n    /**\n     * Max chunk time in microseconds.\n     * Note, not all formats support this and unpredictable things may happen if it is used when not supported.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int max_chunk_duration;\n\n    /**\n     * Max chunk size in bytes\n     * Note, not all formats support this and unpredictable things may happen if it is used when not supported.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int max_chunk_size;\n\n    /**\n     * forces the use of wallclock timestamps as pts/dts of packets\n     * This has undefined results in the presence of B frames.\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    int use_wallclock_as_timestamps;\n\n    /**\n     * avio flags, used to force AVIO_FLAG_DIRECT.\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    int avio_flags;\n\n    /**\n     * The duration field can be estimated through various ways, and this field can be used\n     * to know how the duration was estimated.\n     * - encoding: unused\n     * - decoding: Read by user via AVOptions (NO direct access)\n     */\n    enum AVDurationEstimationMethod duration_estimation_method;\n\n    /**\n     * Skip initial bytes when opening stream\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    int64_t skip_initial_bytes;\n\n    /**\n     * Correct single timestamp overflows\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    unsigned int correct_ts_overflow;\n\n    /**\n     * Force seeking to any (also non key) frames.\n     * - encoding: unused\n     * - decoding: Set by user via AVOptions (NO direct access)\n     */\n    int seek2any;\n\n    /**\n     * Flush the I/O context after each packet.\n     * - encoding: Set by user via AVOptions (NO direct access)\n     * - decoding: unused\n     */\n    int flush_packets;\n\n    /**\n     * format probing score.\n     * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes\n     * the format.\n     * - encoding: unused\n     * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access)\n     */\n    int probe_score;\n\n    /**\n     * number of bytes to read maximally to identify format.\n     * - encoding: unused\n     * - decoding: set by user through AVOPtions (NO direct access)\n     */\n    int format_probesize;\n\n    /**\n     * ',' separated list of allowed decoders.\n     * If NULL then all are allowed\n     * - encoding: unused\n     * - decoding: set by user through AVOptions (NO direct access)\n     */\n    char *codec_whitelist;\n\n    /**\n     * ',' separated list of allowed demuxers.\n     * If NULL then all are allowed\n     * - encoding: unused\n     * - decoding: set by user through AVOptions (NO direct access)\n     */\n    char *format_whitelist;\n\n    /**\n     * An opaque field for libavformat internal usage.\n     * Must not be accessed in any way by callers.\n     */\n    AVFormatInternal *internal;\n\n    /**\n     * IO repositioned flag.\n     * This is set by avformat when the underlaying IO context read pointer\n     * is repositioned, for example when doing byte based seeking.\n     * Demuxers can use the flag to detect such changes.\n     */\n    int io_repositioned;\n\n    /**\n     * Forced video codec.\n     * This allows forcing a specific decoder, even when there are multiple with\n     * the same codec_id.\n     * Demuxing: Set by user via av_format_set_video_codec (NO direct access).\n     */\n    AVCodec *video_codec;\n\n    /**\n     * Forced audio codec.\n     * This allows forcing a specific decoder, even when there are multiple with\n     * the same codec_id.\n     * Demuxing: Set by user via av_format_set_audio_codec (NO direct access).\n     */\n    AVCodec *audio_codec;\n\n    /**\n     * Forced subtitle codec.\n     * This allows forcing a specific decoder, even when there are multiple with\n     * the same codec_id.\n     * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access).\n     */\n    AVCodec *subtitle_codec;\n\n    /**\n     * Forced data codec.\n     * This allows forcing a specific decoder, even when there are multiple with\n     * the same codec_id.\n     * Demuxing: Set by user via av_format_set_data_codec (NO direct access).\n     */\n    AVCodec *data_codec;\n\n    /**\n     * Number of bytes to be written as padding in a metadata header.\n     * Demuxing: Unused.\n     * Muxing: Set by user via av_format_set_metadata_header_padding.\n     */\n    int metadata_header_padding;\n\n    /**\n     * User data.\n     * This is a place for some private data of the user.\n     * Mostly usable with control_message_cb or any future callbacks in device's context.\n     */\n    void *opaque;\n\n    /**\n     * Callback used by devices to communicate with application.\n     */\n    av_format_control_message control_message_cb;\n\n    /**\n     * Output timestamp offset, in microseconds.\n     * Muxing: set by user via AVOptions (NO direct access)\n     */\n    int64_t output_ts_offset;\n\n    /**\n     * Maximum duration (in AV_TIME_BASE units) of the data read\n     * from input in avformat_find_stream_info().\n     * Demuxing only, set by the caller before avformat_find_stream_info()\n     * via AVOptions (NO direct access).\n     * Can be set to 0 to let avformat choose using a heuristic.\n     */\n    int64_t max_analyze_duration2;\n\n    /**\n     * Maximum size of the data read from input for determining\n     * the input container format.\n     * Demuxing only, set by the caller before avformat_open_input()\n     * via AVOptions (NO direct access).\n     */\n    int64_t probesize2;\n\n    /**\n     * dump format separator.\n     * can be \", \" or \"\\n      \" or anything else\n     * Code outside libavformat should access this field using AVOptions\n     * (NO direct access).\n     * - muxing: Set by user.\n     * - demuxing: Set by user.\n     */\n    uint8_t *dump_separator;\n\n    /**\n     * Forced Data codec_id.\n     * Demuxing: Set by user.\n     */\n    enum AVCodecID data_codec_id;\n} AVFormatContext;\n\nint av_format_get_probe_score(const AVFormatContext *s);\nAVCodec * av_format_get_video_codec(const AVFormatContext *s);\nvoid      av_format_set_video_codec(AVFormatContext *s, AVCodec *c);\nAVCodec * av_format_get_audio_codec(const AVFormatContext *s);\nvoid      av_format_set_audio_codec(AVFormatContext *s, AVCodec *c);\nAVCodec * av_format_get_subtitle_codec(const AVFormatContext *s);\nvoid      av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c);\nAVCodec * av_format_get_data_codec(const AVFormatContext *s);\nvoid      av_format_set_data_codec(AVFormatContext *s, AVCodec *c);\nint       av_format_get_metadata_header_padding(const AVFormatContext *s);\nvoid      av_format_set_metadata_header_padding(AVFormatContext *s, int c);\nvoid *    av_format_get_opaque(const AVFormatContext *s);\nvoid      av_format_set_opaque(AVFormatContext *s, void *opaque);\nav_format_control_message av_format_get_control_message_cb(const AVFormatContext *s);\nvoid      av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback);\n\n/**\n * This function will cause global side data to be injected in the next packet\n * of each stream as well as after any subsequent seek.\n */\nvoid av_format_inject_global_side_data(AVFormatContext *s);\n\n/**\n * Returns the method used to set ctx->duration.\n *\n * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE.\n */\nenum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx);\n\ntypedef struct AVPacketList {\n    AVPacket pkt;\n    struct AVPacketList *next;\n} AVPacketList;\n\n\n/**\n * @defgroup lavf_core Core functions\n * @ingroup libavf\n *\n * Functions for querying libavformat capabilities, allocating core structures,\n * etc.\n * @{\n */\n\n/**\n * Return the LIBAVFORMAT_VERSION_INT constant.\n */\nunsigned avformat_version(void);\n\n/**\n * Return the libavformat build-time configuration.\n */\nconst char *avformat_configuration(void);\n\n/**\n * Return the libavformat license.\n */\nconst char *avformat_license(void);\n\n/**\n * Initialize libavformat and register all the muxers, demuxers and\n * protocols. If you do not call this function, then you can select\n * exactly which formats you want to support.\n *\n * @see av_register_input_format()\n * @see av_register_output_format()\n */\nvoid av_register_all(void);\n\nvoid av_register_input_format(AVInputFormat *format);\nvoid av_register_output_format(AVOutputFormat *format);\n\n/**\n * Do global initialization of network components. This is optional,\n * but recommended, since it avoids the overhead of implicitly\n * doing the setup for each session.\n *\n * Calling this function will become mandatory if using network\n * protocols at some major version bump.\n */\nint avformat_network_init(void);\n\n/**\n * Undo the initialization done by avformat_network_init.\n */\nint avformat_network_deinit(void);\n\n/**\n * If f is NULL, returns the first registered input format,\n * if f is non-NULL, returns the next registered input format after f\n * or NULL if f is the last one.\n */\nAVInputFormat  *av_iformat_next(const AVInputFormat  *f);\n\n/**\n * If f is NULL, returns the first registered output format,\n * if f is non-NULL, returns the next registered output format after f\n * or NULL if f is the last one.\n */\nAVOutputFormat *av_oformat_next(const AVOutputFormat *f);\n\n/**\n * Allocate an AVFormatContext.\n * avformat_free_context() can be used to free the context and everything\n * allocated by the framework within it.\n */\nAVFormatContext *avformat_alloc_context(void);\n\n/**\n * Free an AVFormatContext and all its streams.\n * @param s context to free\n */\nvoid avformat_free_context(AVFormatContext *s);\n\n/**\n * Get the AVClass for AVFormatContext. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *avformat_get_class(void);\n\n/**\n * Add a new stream to a media file.\n *\n * When demuxing, it is called by the demuxer in read_header(). If the\n * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also\n * be called in read_packet().\n *\n * When muxing, should be called by the user before avformat_write_header().\n *\n * User is required to call avcodec_close() and avformat_free_context() to\n * clean up the allocation by avformat_new_stream().\n *\n * @param s media file handle\n * @param c If non-NULL, the AVCodecContext corresponding to the new stream\n * will be initialized to use this codec. This is needed for e.g. codec-specific\n * defaults to be set, so codec should be provided if it is known.\n *\n * @return newly created stream or NULL on error.\n */\nAVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c);\n\n/**\n * Get side information from stream.\n *\n * @param stream stream\n * @param type desired side information type\n * @param size pointer for side information size to store (optional)\n * @return pointer to data if present or NULL otherwise\n */\nuint8_t *av_stream_get_side_data(AVStream *stream,\n                                 enum AVPacketSideDataType type, int *size);\n\nAVProgram *av_new_program(AVFormatContext *s, int id);\n\n/**\n * @}\n */\n\n\n/**\n * Allocate an AVFormatContext for an output format.\n * avformat_free_context() can be used to free the context and\n * everything allocated by the framework within it.\n *\n * @param *ctx is set to the created format context, or to NULL in\n * case of failure\n * @param oformat format to use for allocating the context, if NULL\n * format_name and filename are used instead\n * @param format_name the name of output format to use for allocating the\n * context, if NULL filename is used instead\n * @param filename the name of the filename to use for allocating the\n * context, may be NULL\n * @return >= 0 in case of success, a negative AVERROR code in case of\n * failure\n */\nint avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,\n                                   const char *format_name, const char *filename);\n\n/**\n * @addtogroup lavf_decoding\n * @{\n */\n\n/**\n * Find AVInputFormat based on the short name of the input format.\n */\nAVInputFormat *av_find_input_format(const char *short_name);\n\n/**\n * Guess the file format.\n *\n * @param pd        data to be probed\n * @param is_opened Whether the file is already opened; determines whether\n *                  demuxers with or without AVFMT_NOFILE are probed.\n */\nAVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);\n\n/**\n * Guess the file format.\n *\n * @param pd        data to be probed\n * @param is_opened Whether the file is already opened; determines whether\n *                  demuxers with or without AVFMT_NOFILE are probed.\n * @param score_max A probe score larger that this is required to accept a\n *                  detection, the variable is set to the actual detection\n *                  score afterwards.\n *                  If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended\n *                  to retry with a larger probe buffer.\n */\nAVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max);\n\n/**\n * Guess the file format.\n *\n * @param is_opened Whether the file is already opened; determines whether\n *                  demuxers with or without AVFMT_NOFILE are probed.\n * @param score_ret The score of the best detection.\n */\nAVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret);\n\n/**\n * Probe a bytestream to determine the input format. Each time a probe returns\n * with a score that is too low, the probe buffer size is increased and another\n * attempt is made. When the maximum probe size is reached, the input format\n * with the highest score is returned.\n *\n * @param pb the bytestream to probe\n * @param fmt the input format is put here\n * @param filename the filename of the stream\n * @param logctx the log context\n * @param offset the offset within the bytestream to probe from\n * @param max_probe_size the maximum probe buffer size (zero for default)\n * @return the score in case of success, a negative value corresponding to an\n *         the maximal score is AVPROBE_SCORE_MAX\n * AVERROR code otherwise\n */\nint av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,\n                           const char *filename, void *logctx,\n                           unsigned int offset, unsigned int max_probe_size);\n\n/**\n * Like av_probe_input_buffer2() but returns 0 on success\n */\nint av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,\n                          const char *filename, void *logctx,\n                          unsigned int offset, unsigned int max_probe_size);\n\n/**\n * Open an input stream and read the header. The codecs are not opened.\n * The stream must be closed with avformat_close_input().\n *\n * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context).\n *           May be a pointer to NULL, in which case an AVFormatContext is allocated by this\n *           function and written into ps.\n *           Note that a user-supplied AVFormatContext will be freed on failure.\n * @param filename Name of the stream to open.\n * @param fmt If non-NULL, this parameter forces a specific input format.\n *            Otherwise the format is autodetected.\n * @param options  A dictionary filled with AVFormatContext and demuxer-private options.\n *                 On return this parameter will be destroyed and replaced with a dict containing\n *                 options that were not found. May be NULL.\n *\n * @return 0 on success, a negative AVERROR on failure.\n *\n * @note If you want to use custom IO, preallocate the format context and set its pb field.\n */\nint avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);\n\nattribute_deprecated\nint av_demuxer_open(AVFormatContext *ic);\n\n/**\n * Read packets of a media file to get stream information. This\n * is useful for file formats with no headers such as MPEG. This\n * function also computes the real framerate in case of MPEG-2 repeat\n * frame mode.\n * The logical file position is not changed by this function;\n * examined packets may be buffered for later processing.\n *\n * @param ic media file handle\n * @param options  If non-NULL, an ic.nb_streams long array of pointers to\n *                 dictionaries, where i-th member contains options for\n *                 codec corresponding to i-th stream.\n *                 On return each dictionary will be filled with options that were not found.\n * @return >=0 if OK, AVERROR_xxx on error\n *\n * @note this function isn't guaranteed to open all the codecs, so\n *       options being non-empty at return is a perfectly normal behavior.\n *\n * @todo Let the user decide somehow what information is needed so that\n *       we do not waste time getting stuff the user does not need.\n */\nint avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options);\n\n/**\n * Find the programs which belong to a given stream.\n *\n * @param ic    media file handle\n * @param last  the last found program, the search will start after this\n *              program, or from the beginning if it is NULL\n * @param s     stream index\n * @return the next program which belongs to s, NULL if no program is found or\n *         the last program is not among the programs of ic.\n */\nAVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s);\n\n/**\n * Find the \"best\" stream in the file.\n * The best stream is determined according to various heuristics as the most\n * likely to be what the user expects.\n * If the decoder parameter is non-NULL, av_find_best_stream will find the\n * default decoder for the stream's codec; streams for which no decoder can\n * be found are ignored.\n *\n * @param ic                media file handle\n * @param type              stream type: video, audio, subtitles, etc.\n * @param wanted_stream_nb  user-requested stream number,\n *                          or -1 for automatic selection\n * @param related_stream    try to find a stream related (eg. in the same\n *                          program) to this one, or -1 if none\n * @param decoder_ret       if non-NULL, returns the decoder for the\n *                          selected stream\n * @param flags             flags; none are currently defined\n * @return  the non-negative stream number in case of success,\n *          AVERROR_STREAM_NOT_FOUND if no stream with the requested type\n *          could be found,\n *          AVERROR_DECODER_NOT_FOUND if streams were found but no decoder\n * @note  If av_find_best_stream returns successfully and decoder_ret is not\n *        NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec.\n */\nint av_find_best_stream(AVFormatContext *ic,\n                        enum AVMediaType type,\n                        int wanted_stream_nb,\n                        int related_stream,\n                        AVCodec **decoder_ret,\n                        int flags);\n\n/**\n * Return the next frame of a stream.\n * This function returns what is stored in the file, and does not validate\n * that what is there are valid frames for the decoder. It will split what is\n * stored in the file into frames and return one for each call. It will not\n * omit invalid data between valid frames so as to give the decoder the maximum\n * information possible for decoding.\n *\n * If pkt->buf is NULL, then the packet is valid until the next\n * av_read_frame() or until avformat_close_input(). Otherwise the packet\n * is valid indefinitely. In both cases the packet must be freed with\n * av_free_packet when it is no longer needed. For video, the packet contains\n * exactly one frame. For audio, it contains an integer number of frames if each\n * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames\n * have a variable size (e.g. MPEG audio), then it contains one frame.\n *\n * pkt->pts, pkt->dts and pkt->duration are always set to correct\n * values in AVStream.time_base units (and guessed if the format cannot\n * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format\n * has B-frames, so it is better to rely on pkt->dts if you do not\n * decompress the payload.\n *\n * @return 0 if OK, < 0 on error or end of file\n */\nint av_read_frame(AVFormatContext *s, AVPacket *pkt);\n\n/**\n * Seek to the keyframe at timestamp.\n * 'timestamp' in 'stream_index'.\n *\n * @param s media file handle\n * @param stream_index If stream_index is (-1), a default\n * stream is selected, and timestamp is automatically converted\n * from AV_TIME_BASE units to the stream specific time_base.\n * @param timestamp Timestamp in AVStream.time_base units\n *        or, if no stream is specified, in AV_TIME_BASE units.\n * @param flags flags which select direction and seeking mode\n * @return >= 0 on success\n */\nint av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,\n                  int flags);\n\n/**\n * Seek to timestamp ts.\n * Seeking will be done so that the point from which all active streams\n * can be presented successfully will be closest to ts and within min/max_ts.\n * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL.\n *\n * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and\n * are the file position (this may not be supported by all demuxers).\n * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames\n * in the stream with stream_index (this may not be supported by all demuxers).\n * Otherwise all timestamps are in units of the stream selected by stream_index\n * or if stream_index is -1, in AV_TIME_BASE units.\n * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as\n * keyframes (this may not be supported by all demuxers).\n * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored.\n *\n * @param s media file handle\n * @param stream_index index of the stream which is used as time base reference\n * @param min_ts smallest acceptable timestamp\n * @param ts target timestamp\n * @param max_ts largest acceptable timestamp\n * @param flags flags\n * @return >=0 on success, error code otherwise\n *\n * @note This is part of the new seek API which is still under construction.\n *       Thus do not use this yet. It may change at any time, do not expect\n *       ABI compatibility yet!\n */\nint avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);\n\n/**\n * Discard all internally buffered data. This can be useful when dealing with\n * discontinuities in the byte stream. Generally works only with formats that\n * can resync. This includes headerless formats like MPEG-TS/TS but should also\n * work with NUT, Ogg and in a limited way AVI for example.\n *\n * The set of streams, the detected duration, stream parameters and codecs do\n * not change when calling this function. If you want a complete reset, it's\n * better to open a new AVFormatContext.\n *\n * This does not flush the AVIOContext (s->pb). If necessary, call\n * avio_flush(s->pb) before calling this function.\n *\n * @param s media file handle\n * @return >=0 on success, error code otherwise\n */\nint avformat_flush(AVFormatContext *s);\n\n/**\n * Start playing a network-based stream (e.g. RTSP stream) at the\n * current position.\n */\nint av_read_play(AVFormatContext *s);\n\n/**\n * Pause a network-based stream (e.g. RTSP stream).\n *\n * Use av_read_play() to resume it.\n */\nint av_read_pause(AVFormatContext *s);\n\n/**\n * Close an opened input AVFormatContext. Free it and all its contents\n * and set *s to NULL.\n */\nvoid avformat_close_input(AVFormatContext **s);\n/**\n * @}\n */\n\n#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward\n#define AVSEEK_FLAG_BYTE     2 ///< seeking based on position in bytes\n#define AVSEEK_FLAG_ANY      4 ///< seek to any frame, even non-keyframes\n#define AVSEEK_FLAG_FRAME    8 ///< seeking based on frame number\n\n/**\n * @addtogroup lavf_encoding\n * @{\n */\n/**\n * Allocate the stream private data and write the stream header to\n * an output media file.\n *\n * @param s Media file handle, must be allocated with avformat_alloc_context().\n *          Its oformat field must be set to the desired output format;\n *          Its pb field must be set to an already opened AVIOContext.\n * @param options  An AVDictionary filled with AVFormatContext and muxer-private options.\n *                 On return this parameter will be destroyed and replaced with a dict containing\n *                 options that were not found. May be NULL.\n *\n * @return 0 on success, negative AVERROR on failure.\n *\n * @see av_opt_find, av_dict_set, avio_open, av_oformat_next.\n */\nint avformat_write_header(AVFormatContext *s, AVDictionary **options);\n\n/**\n * Write a packet to an output media file.\n *\n * This function passes the packet directly to the muxer, without any buffering\n * or reordering. The caller is responsible for correctly interleaving the\n * packets if the format requires it. Callers that want libavformat to handle\n * the interleaving should call av_interleaved_write_frame() instead of this\n * function.\n *\n * @param s media file handle\n * @param pkt The packet containing the data to be written. Note that unlike\n *            av_interleaved_write_frame(), this function does not take\n *            ownership of the packet passed to it (though some muxers may make\n *            an internal reference to the input packet).\n *            <br>\n *            This parameter can be NULL (at any time, not just at the end), in\n *            order to immediately flush data buffered within the muxer, for\n *            muxers that buffer up data internally before writing it to the\n *            output.\n *            <br>\n *            Packet's @ref AVPacket.stream_index \"stream_index\" field must be\n *            set to the index of the corresponding stream in @ref\n *            AVFormatContext.streams \"s->streams\". It is very strongly\n *            recommended that timing information (@ref AVPacket.pts \"pts\", @ref\n *            AVPacket.dts \"dts\", @ref AVPacket.duration \"duration\") is set to\n *            correct values.\n * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush\n *\n * @see av_interleaved_write_frame()\n */\nint av_write_frame(AVFormatContext *s, AVPacket *pkt);\n\n/**\n * Write a packet to an output media file ensuring correct interleaving.\n *\n * This function will buffer the packets internally as needed to make sure the\n * packets in the output file are properly interleaved in the order of\n * increasing dts. Callers doing their own interleaving should call\n * av_write_frame() instead of this function.\n *\n * @param s media file handle\n * @param pkt The packet containing the data to be written.\n *            <br>\n *            If the packet is reference-counted, this function will take\n *            ownership of this reference and unreference it later when it sees\n *            fit.\n *            The caller must not access the data through this reference after\n *            this function returns. If the packet is not reference-counted,\n *            libavformat will make a copy.\n *            <br>\n *            This parameter can be NULL (at any time, not just at the end), to\n *            flush the interleaving queues.\n *            <br>\n *            Packet's @ref AVPacket.stream_index \"stream_index\" field must be\n *            set to the index of the corresponding stream in @ref\n *            AVFormatContext.streams \"s->streams\". It is very strongly\n *            recommended that timing information (@ref AVPacket.pts \"pts\", @ref\n *            AVPacket.dts \"dts\", @ref AVPacket.duration \"duration\") is set to\n *            correct values.\n *\n * @return 0 on success, a negative AVERROR on error. Libavformat will always\n *         take care of freeing the packet, even if this function fails.\n *\n * @see av_write_frame(), AVFormatContext.max_interleave_delta\n */\nint av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);\n\n/**\n * Write a uncoded frame to an output media file.\n *\n * The frame must be correctly interleaved according to the container\n * specification; if not, then av_interleaved_write_frame() must be used.\n *\n * See av_interleaved_write_frame() for details.\n */\nint av_write_uncoded_frame(AVFormatContext *s, int stream_index,\n                           AVFrame *frame);\n\n/**\n * Write a uncoded frame to an output media file.\n *\n * If the muxer supports it, this function allows to write an AVFrame\n * structure directly, without encoding it into a packet.\n * It is mostly useful for devices and similar special muxers that use raw\n * video or PCM data and will not serialize it into a byte stream.\n *\n * To test whether it is possible to use it with a given muxer and stream,\n * use av_write_uncoded_frame_query().\n *\n * The caller gives up ownership of the frame and must not access it\n * afterwards.\n *\n * @return  >=0 for success, a negative code on error\n */\nint av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,\n                                       AVFrame *frame);\n\n/**\n * Test whether a muxer supports uncoded frame.\n *\n * @return  >=0 if an uncoded frame can be written to that muxer and stream,\n *          <0 if not\n */\nint av_write_uncoded_frame_query(AVFormatContext *s, int stream_index);\n\n/**\n * Write the stream trailer to an output media file and free the\n * file private data.\n *\n * May only be called after a successful call to avformat_write_header.\n *\n * @param s media file handle\n * @return 0 if OK, AVERROR_xxx on error\n */\nint av_write_trailer(AVFormatContext *s);\n\n/**\n * Return the output format in the list of registered output formats\n * which best matches the provided parameters, or return NULL if\n * there is no match.\n *\n * @param short_name if non-NULL checks if short_name matches with the\n * names of the registered formats\n * @param filename if non-NULL checks if filename terminates with the\n * extensions of the registered formats\n * @param mime_type if non-NULL checks if mime_type matches with the\n * MIME type of the registered formats\n */\nAVOutputFormat *av_guess_format(const char *short_name,\n                                const char *filename,\n                                const char *mime_type);\n\n/**\n * Guess the codec ID based upon muxer and filename.\n */\nenum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,\n                            const char *filename, const char *mime_type,\n                            enum AVMediaType type);\n\n/**\n * Get timing information for the data currently output.\n * The exact meaning of \"currently output\" depends on the format.\n * It is mostly relevant for devices that have an internal buffer and/or\n * work in real time.\n * @param s          media file handle\n * @param stream     stream in the media file\n * @param[out] dts   DTS of the last packet output for the stream, in stream\n *                   time_base units\n * @param[out] wall  absolute time when that packet whas output,\n *                   in microsecond\n * @return  0 if OK, AVERROR(ENOSYS) if the format does not support it\n * Note: some formats or devices may not allow to measure dts and wall\n * atomically.\n */\nint av_get_output_timestamp(struct AVFormatContext *s, int stream,\n                            int64_t *dts, int64_t *wall);\n\n\n/**\n * @}\n */\n\n\n/**\n * @defgroup lavf_misc Utility functions\n * @ingroup libavf\n * @{\n *\n * Miscellaneous utility functions related to both muxing and demuxing\n * (or neither).\n */\n\n/**\n * Send a nice hexadecimal dump of a buffer to the specified file stream.\n *\n * @param f The file stream pointer where the dump should be sent to.\n * @param buf buffer\n * @param size buffer size\n *\n * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2\n */\nvoid av_hex_dump(FILE *f, const uint8_t *buf, int size);\n\n/**\n * Send a nice hexadecimal dump of a buffer to the log.\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n * pointer to an AVClass struct.\n * @param level The importance level of the message, lower values signifying\n * higher importance.\n * @param buf buffer\n * @param size buffer size\n *\n * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2\n */\nvoid av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size);\n\n/**\n * Send a nice dump of a packet to the specified file stream.\n *\n * @param f The file stream pointer where the dump should be sent to.\n * @param pkt packet to dump\n * @param dump_payload True if the payload must be displayed, too.\n * @param st AVStream that the packet belongs to\n */\nvoid av_pkt_dump2(FILE *f, const AVPacket *pkt, int dump_payload, const AVStream *st);\n\n\n/**\n * Send a nice dump of a packet to the log.\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n * pointer to an AVClass struct.\n * @param level The importance level of the message, lower values signifying\n * higher importance.\n * @param pkt packet to dump\n * @param dump_payload True if the payload must be displayed, too.\n * @param st AVStream that the packet belongs to\n */\nvoid av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload,\n                      const AVStream *st);\n\n/**\n * Get the AVCodecID for the given codec tag tag.\n * If no codec id is found returns AV_CODEC_ID_NONE.\n *\n * @param tags list of supported codec_id-codec_tag pairs, as stored\n * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag\n * @param tag  codec tag to match to a codec ID\n */\nenum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag);\n\n/**\n * Get the codec tag for the given codec id id.\n * If no codec tag is found returns 0.\n *\n * @param tags list of supported codec_id-codec_tag pairs, as stored\n * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag\n * @param id   codec ID to match to a codec tag\n */\nunsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id);\n\n/**\n * Get the codec tag for the given codec id.\n *\n * @param tags list of supported codec_id - codec_tag pairs, as stored\n * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag\n * @param id codec id that should be searched for in the list\n * @param tag A pointer to the found tag\n * @return 0 if id was not found in tags, > 0 if it was found\n */\nint av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id,\n                      unsigned int *tag);\n\nint av_find_default_stream_index(AVFormatContext *s);\n\n/**\n * Get the index for a specific timestamp.\n *\n * @param st        stream that the timestamp belongs to\n * @param timestamp timestamp to retrieve the index for\n * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond\n *                 to the timestamp which is <= the requested one, if backward\n *                 is 0, then it will be >=\n *              if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise\n * @return < 0 if no such timestamp could be found\n */\nint av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);\n\n/**\n * Add an index entry into a sorted list. Update the entry if the list\n * already contains it.\n *\n * @param timestamp timestamp in the time base of the given stream\n */\nint av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,\n                       int size, int distance, int flags);\n\n\n/**\n * Split a URL string into components.\n *\n * The pointers to buffers for storing individual components may be null,\n * in order to ignore that component. Buffers for components not found are\n * set to empty strings. If the port is not found, it is set to a negative\n * value.\n *\n * @param proto the buffer for the protocol\n * @param proto_size the size of the proto buffer\n * @param authorization the buffer for the authorization\n * @param authorization_size the size of the authorization buffer\n * @param hostname the buffer for the host name\n * @param hostname_size the size of the hostname buffer\n * @param port_ptr a pointer to store the port number in\n * @param path the buffer for the path\n * @param path_size the size of the path buffer\n * @param url the URL to split\n */\nvoid av_url_split(char *proto,         int proto_size,\n                  char *authorization, int authorization_size,\n                  char *hostname,      int hostname_size,\n                  int *port_ptr,\n                  char *path,          int path_size,\n                  const char *url);\n\n\n/**\n * Print detailed information about the input or output format, such as\n * duration, bitrate, streams, container, programs, metadata, side data,\n * codec and time base.\n *\n * @param ic        the context to analyze\n * @param index     index of the stream to dump information about\n * @param url       the URL to print, such as source or destination file\n * @param is_output Select whether the specified context is an input(0) or output(1)\n */\nvoid av_dump_format(AVFormatContext *ic,\n                    int index,\n                    const char *url,\n                    int is_output);\n\n/**\n * Return in 'buf' the path with '%d' replaced by a number.\n *\n * Also handles the '%0nd' format where 'n' is the total number\n * of digits and '%%'.\n *\n * @param buf destination buffer\n * @param buf_size destination buffer size\n * @param path numbered sequence string\n * @param number frame number\n * @return 0 if OK, -1 on format error\n */\nint av_get_frame_filename(char *buf, int buf_size,\n                          const char *path, int number);\n\n/**\n * Check whether filename actually is a numbered sequence generator.\n *\n * @param filename possible numbered sequence string\n * @return 1 if a valid numbered sequence string, 0 otherwise\n */\nint av_filename_number_test(const char *filename);\n\n/**\n * Generate an SDP for an RTP session.\n *\n * Note, this overwrites the id values of AVStreams in the muxer contexts\n * for getting unique dynamic payload types.\n *\n * @param ac array of AVFormatContexts describing the RTP streams. If the\n *           array is composed by only one context, such context can contain\n *           multiple AVStreams (one AVStream per RTP stream). Otherwise,\n *           all the contexts in the array (an AVCodecContext per RTP stream)\n *           must contain only one AVStream.\n * @param n_files number of AVCodecContexts contained in ac\n * @param buf buffer where the SDP will be stored (must be allocated by\n *            the caller)\n * @param size the size of the buffer\n * @return 0 if OK, AVERROR_xxx on error\n */\nint av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size);\n\n/**\n * Return a positive value if the given filename has one of the given\n * extensions, 0 otherwise.\n *\n * @param filename   file name to check against the given extensions\n * @param extensions a comma-separated list of filename extensions\n */\nint av_match_ext(const char *filename, const char *extensions);\n\n/**\n * Test if the given container can store a codec.\n *\n * @param ofmt           container to check for compatibility\n * @param codec_id       codec to potentially store in container\n * @param std_compliance standards compliance level, one of FF_COMPLIANCE_*\n *\n * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot.\n *         A negative number if this information is not available.\n */\nint avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,\n                         int std_compliance);\n\n/**\n * @defgroup riff_fourcc RIFF FourCCs\n * @{\n * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are\n * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the\n * following code:\n * @code\n * uint32_t tag = MKTAG('H', '2', '6', '4');\n * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 };\n * enum AVCodecID id = av_codec_get_id(table, tag);\n * @endcode\n */\n/**\n * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID.\n */\nconst struct AVCodecTag *avformat_get_riff_video_tags(void);\n/**\n * @return the table mapping RIFF FourCCs for audio to AVCodecID.\n */\nconst struct AVCodecTag *avformat_get_riff_audio_tags(void);\n/**\n * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID.\n */\nconst struct AVCodecTag *avformat_get_mov_video_tags(void);\n/**\n * @return the table mapping MOV FourCCs for audio to AVCodecID.\n */\nconst struct AVCodecTag *avformat_get_mov_audio_tags(void);\n\n/**\n * @}\n */\n\n/**\n * Guess the sample aspect ratio of a frame, based on both the stream and the\n * frame aspect ratio.\n *\n * Since the frame aspect ratio is set by the codec but the stream aspect ratio\n * is set by the demuxer, these two may not be equal. This function tries to\n * return the value that you should use if you would like to display the frame.\n *\n * Basic logic is to use the stream aspect ratio if it is set to something sane\n * otherwise use the frame aspect ratio. This way a container setting, which is\n * usually easy to modify can override the coded value in the frames.\n *\n * @param format the format context which the stream is part of\n * @param stream the stream which the frame is part of\n * @param frame the frame with the aspect ratio to be determined\n * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea\n */\nAVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame);\n\n/**\n * Guess the frame rate, based on both the container and codec information.\n *\n * @param ctx the format context which the stream is part of\n * @param stream the stream which the frame is part of\n * @param frame the frame for which the frame rate should be determined, may be NULL\n * @return the guessed (valid) frame rate, 0/1 if no idea\n */\nAVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame);\n\n/**\n * Check if the stream st contained in s is matched by the stream specifier\n * spec.\n *\n * See the \"stream specifiers\" chapter in the documentation for the syntax\n * of spec.\n *\n * @return  >0 if st is matched by spec;\n *          0  if st is not matched by spec;\n *          AVERROR code if spec is invalid\n *\n * @note  A stream specifier can match several streams in the format.\n */\nint avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,\n                                    const char *spec);\n\nint avformat_queue_attached_pictures(AVFormatContext *s);\n\n\n/**\n * @}\n */\n\n#endif /* AVFORMAT_AVFORMAT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavformat/avio.h",
    "content": "/*\n * copyright (c) 2001 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n#ifndef AVFORMAT_AVIO_H\n#define AVFORMAT_AVIO_H\n\n/**\n * @file\n * @ingroup lavf_io\n * Buffered I/O operations\n */\n\n#include <stdint.h>\n\n#include \"libavutil/common.h\"\n#include \"libavutil/dict.h\"\n#include \"libavutil/log.h\"\n\n#include \"libavformat/version.h\"\n\n\n#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */\n\n/**\n * Callback for checking whether to abort blocking functions.\n * AVERROR_EXIT is returned in this case by the interrupted\n * function. During blocking operations, callback is called with\n * opaque as parameter. If the callback returns 1, the\n * blocking operation will be aborted.\n *\n * No members can be added to this struct without a major bump, if\n * new elements have been added after this struct in AVFormatContext\n * or AVIOContext.\n */\ntypedef struct AVIOInterruptCB {\n    int (*callback)(void*);\n    void *opaque;\n} AVIOInterruptCB;\n\n/**\n * Bytestream IO Context.\n * New fields can be added to the end with minor version bumps.\n * Removal, reordering and changes to existing fields require a major\n * version bump.\n * sizeof(AVIOContext) must not be used outside libav*.\n *\n * @note None of the function pointers in AVIOContext should be called\n *       directly, they should only be set by the client application\n *       when implementing custom I/O. Normally these are set to the\n *       function pointers specified in avio_alloc_context()\n */\ntypedef struct AVIOContext {\n    /**\n     * A class for private options.\n     *\n     * If this AVIOContext is created by avio_open2(), av_class is set and\n     * passes the options down to protocols.\n     *\n     * If this AVIOContext is manually allocated, then av_class may be set by\n     * the caller.\n     *\n     * warning -- this field can be NULL, be sure to not pass this AVIOContext\n     * to any av_opt_* functions in that case.\n     */\n    const AVClass *av_class;\n    unsigned char *buffer;  /**< Start of the buffer. */\n    int buffer_size;        /**< Maximum buffer size */\n    unsigned char *buf_ptr; /**< Current position in the buffer */\n    unsigned char *buf_end; /**< End of the data, may be less than\n                                 buffer+buffer_size if the read function returned\n                                 less data than requested, e.g. for streams where\n                                 no more data has been received yet. */\n    void *opaque;           /**< A private pointer, passed to the read/write/seek/...\n                                 functions. */\n    int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);\n    int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);\n    int64_t (*seek)(void *opaque, int64_t offset, int whence);\n    int64_t pos;            /**< position in the file of the current buffer */\n    int must_flush;         /**< true if the next seek should flush */\n    int eof_reached;        /**< true if eof reached */\n    int write_flag;         /**< true if open for writing */\n    int max_packet_size;\n    unsigned long checksum;\n    unsigned char *checksum_ptr;\n    unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);\n    int error;              /**< contains the error code or 0 if no error happened */\n    /**\n     * Pause or resume playback for network streaming protocols - e.g. MMS.\n     */\n    int (*read_pause)(void *opaque, int pause);\n    /**\n     * Seek to a given timestamp in stream with the specified stream_index.\n     * Needed for some network streaming protocols which don't support seeking\n     * to byte position.\n     */\n    int64_t (*read_seek)(void *opaque, int stream_index,\n                         int64_t timestamp, int flags);\n    /**\n     * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.\n     */\n    int seekable;\n\n    /**\n     * max filesize, used to limit allocations\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int64_t maxsize;\n\n    /**\n     * avio_read and avio_write should if possible be satisfied directly\n     * instead of going through a buffer, and avio_seek will always\n     * call the underlying seek function directly.\n     */\n    int direct;\n\n    /**\n     * Bytes read statistic\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int64_t bytes_read;\n\n    /**\n     * seek statistic\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int seek_count;\n\n    /**\n     * writeout statistic\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int writeout_count;\n\n    /**\n     * Original buffer size\n     * used internally after probing and ensure seekback to reset the buffer size\n     * This field is internal to libavformat and access from outside is not allowed.\n     */\n    int orig_buffer_size;\n} AVIOContext;\n\n/* unbuffered I/O */\n\n/**\n * Return the name of the protocol that will handle the passed URL.\n *\n * NULL is returned if no protocol could be found for the given URL.\n *\n * @return Name of the protocol or NULL.\n */\nconst char *avio_find_protocol_name(const char *url);\n\n/**\n * Return AVIO_FLAG_* access flags corresponding to the access permissions\n * of the resource in url, or a negative value corresponding to an\n * AVERROR code in case of failure. The returned access flags are\n * masked by the value in flags.\n *\n * @note This function is intrinsically unsafe, in the sense that the\n * checked resource may change its existence or permission status from\n * one call to another. Thus you should not trust the returned value,\n * unless you are sure that no other processes are accessing the\n * checked resource.\n */\nint avio_check(const char *url, int flags);\n\n/**\n * Allocate and initialize an AVIOContext for buffered I/O. It must be later\n * freed with av_free().\n *\n * @param buffer Memory block for input/output operations via AVIOContext.\n *        The buffer must be allocated with av_malloc() and friends.\n *        It may be freed and replaced with a new buffer by libavformat.\n *        AVIOContext.buffer holds the buffer currently in use,\n *        which must be later freed with av_free().\n * @param buffer_size The buffer size is very important for performance.\n *        For protocols with fixed blocksize it should be set to this blocksize.\n *        For others a typical size is a cache page, e.g. 4kb.\n * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise.\n * @param opaque An opaque pointer to user-specific data.\n * @param read_packet  A function for refilling the buffer, may be NULL.\n * @param write_packet A function for writing the buffer contents, may be NULL.\n *        The function may not change the input buffers content.\n * @param seek A function for seeking to specified byte position, may be NULL.\n *\n * @return Allocated AVIOContext or NULL on failure.\n */\nAVIOContext *avio_alloc_context(\n                  unsigned char *buffer,\n                  int buffer_size,\n                  int write_flag,\n                  void *opaque,\n                  int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),\n                  int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),\n                  int64_t (*seek)(void *opaque, int64_t offset, int whence));\n\nvoid avio_w8(AVIOContext *s, int b);\nvoid avio_write(AVIOContext *s, const unsigned char *buf, int size);\nvoid avio_wl64(AVIOContext *s, uint64_t val);\nvoid avio_wb64(AVIOContext *s, uint64_t val);\nvoid avio_wl32(AVIOContext *s, unsigned int val);\nvoid avio_wb32(AVIOContext *s, unsigned int val);\nvoid avio_wl24(AVIOContext *s, unsigned int val);\nvoid avio_wb24(AVIOContext *s, unsigned int val);\nvoid avio_wl16(AVIOContext *s, unsigned int val);\nvoid avio_wb16(AVIOContext *s, unsigned int val);\n\n/**\n * Write a NULL-terminated string.\n * @return number of bytes written.\n */\nint avio_put_str(AVIOContext *s, const char *str);\n\n/**\n * Convert an UTF-8 string to UTF-16LE and write it.\n * @return number of bytes written.\n */\nint avio_put_str16le(AVIOContext *s, const char *str);\n\n/**\n * Convert an UTF-8 string to UTF-16BE and write it.\n * @return number of bytes written.\n */\nint avio_put_str16be(AVIOContext *s, const char *str);\n\n/**\n * Passing this as the \"whence\" parameter to a seek function causes it to\n * return the filesize without seeking anywhere. Supporting this is optional.\n * If it is not supported then the seek function will return <0.\n */\n#define AVSEEK_SIZE 0x10000\n\n/**\n * Oring this flag as into the \"whence\" parameter to a seek function causes it to\n * seek by any means (like reopening and linear reading) or other normally unreasonable\n * means that can be extremely slow.\n * This may be ignored by the seek code.\n */\n#define AVSEEK_FORCE 0x20000\n\n/**\n * fseek() equivalent for AVIOContext.\n * @return new position or AVERROR.\n */\nint64_t avio_seek(AVIOContext *s, int64_t offset, int whence);\n\n/**\n * Skip given number of bytes forward\n * @return new position or AVERROR.\n */\nint64_t avio_skip(AVIOContext *s, int64_t offset);\n\n/**\n * ftell() equivalent for AVIOContext.\n * @return position or AVERROR.\n */\nstatic av_always_inline int64_t avio_tell(AVIOContext *s)\n{\n    return avio_seek(s, 0, SEEK_CUR);\n}\n\n/**\n * Get the filesize.\n * @return filesize or AVERROR\n */\nint64_t avio_size(AVIOContext *s);\n\n/**\n * feof() equivalent for AVIOContext.\n * @return non zero if and only if end of file\n */\nint avio_feof(AVIOContext *s);\n#if FF_API_URL_FEOF\n/**\n * @deprecated use avio_feof()\n */\nattribute_deprecated\nint url_feof(AVIOContext *s);\n#endif\n\n/** @warning currently size is limited */\nint avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3);\n\n/**\n * Force flushing of buffered data.\n *\n * For write streams, force the buffered data to be immediately written to the output,\n * without to wait to fill the internal buffer.\n *\n * For read streams, discard all currently buffered data, and advance the\n * reported file position to that of the underlying stream. This does not\n * read new data, and does not perform any seeks.\n */\nvoid avio_flush(AVIOContext *s);\n\n/**\n * Read size bytes from AVIOContext into buf.\n * @return number of bytes read or AVERROR\n */\nint avio_read(AVIOContext *s, unsigned char *buf, int size);\n\n/**\n * @name Functions for reading from AVIOContext\n * @{\n *\n * @note return 0 if EOF, so you cannot use it if EOF handling is\n *       necessary\n */\nint          avio_r8  (AVIOContext *s);\nunsigned int avio_rl16(AVIOContext *s);\nunsigned int avio_rl24(AVIOContext *s);\nunsigned int avio_rl32(AVIOContext *s);\nuint64_t     avio_rl64(AVIOContext *s);\nunsigned int avio_rb16(AVIOContext *s);\nunsigned int avio_rb24(AVIOContext *s);\nunsigned int avio_rb32(AVIOContext *s);\nuint64_t     avio_rb64(AVIOContext *s);\n/**\n * @}\n */\n\n/**\n * Read a string from pb into buf. The reading will terminate when either\n * a NULL character was encountered, maxlen bytes have been read, or nothing\n * more can be read from pb. The result is guaranteed to be NULL-terminated, it\n * will be truncated if buf is too small.\n * Note that the string is not interpreted or validated in any way, it\n * might get truncated in the middle of a sequence for multi-byte encodings.\n *\n * @return number of bytes read (is always <= maxlen).\n * If reading ends on EOF or error, the return value will be one more than\n * bytes actually read.\n */\nint avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen);\n\n/**\n * Read a UTF-16 string from pb and convert it to UTF-8.\n * The reading will terminate when either a null or invalid character was\n * encountered or maxlen bytes have been read.\n * @return number of bytes read (is always <= maxlen)\n */\nint avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen);\nint avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen);\n\n\n/**\n * @name URL open modes\n * The flags argument to avio_open must be one of the following\n * constants, optionally ORed with other flags.\n * @{\n */\n#define AVIO_FLAG_READ  1                                      /**< read-only */\n#define AVIO_FLAG_WRITE 2                                      /**< write-only */\n#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE)  /**< read-write pseudo flag */\n/**\n * @}\n */\n\n/**\n * Use non-blocking mode.\n * If this flag is set, operations on the context will return\n * AVERROR(EAGAIN) if they can not be performed immediately.\n * If this flag is not set, operations on the context will never return\n * AVERROR(EAGAIN).\n * Note that this flag does not affect the opening/connecting of the\n * context. Connecting a protocol will always block if necessary (e.g. on\n * network protocols) but never hang (e.g. on busy devices).\n * Warning: non-blocking protocols is work-in-progress; this flag may be\n * silently ignored.\n */\n#define AVIO_FLAG_NONBLOCK 8\n\n/**\n * Use direct mode.\n * avio_read and avio_write should if possible be satisfied directly\n * instead of going through a buffer, and avio_seek will always\n * call the underlying seek function directly.\n */\n#define AVIO_FLAG_DIRECT 0x8000\n\n/**\n * Create and initialize a AVIOContext for accessing the\n * resource indicated by url.\n * @note When the resource indicated by url has been opened in\n * read+write mode, the AVIOContext can be used only for writing.\n *\n * @param s Used to return the pointer to the created AVIOContext.\n * In case of failure the pointed to value is set to NULL.\n * @param url resource to access\n * @param flags flags which control how the resource indicated by url\n * is to be opened\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code in case of failure\n */\nint avio_open(AVIOContext **s, const char *url, int flags);\n\n/**\n * Create and initialize a AVIOContext for accessing the\n * resource indicated by url.\n * @note When the resource indicated by url has been opened in\n * read+write mode, the AVIOContext can be used only for writing.\n *\n * @param s Used to return the pointer to the created AVIOContext.\n * In case of failure the pointed to value is set to NULL.\n * @param url resource to access\n * @param flags flags which control how the resource indicated by url\n * is to be opened\n * @param int_cb an interrupt callback to be used at the protocols level\n * @param options  A dictionary filled with protocol-private options. On return\n * this parameter will be destroyed and replaced with a dict containing options\n * that were not found. May be NULL.\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code in case of failure\n */\nint avio_open2(AVIOContext **s, const char *url, int flags,\n               const AVIOInterruptCB *int_cb, AVDictionary **options);\n\n/**\n * Close the resource accessed by the AVIOContext s and free it.\n * This function can only be used if s was opened by avio_open().\n *\n * The internal buffer is automatically flushed before closing the\n * resource.\n *\n * @return 0 on success, an AVERROR < 0 on error.\n * @see avio_closep\n */\nint avio_close(AVIOContext *s);\n\n/**\n * Close the resource accessed by the AVIOContext *s, free it\n * and set the pointer pointing to it to NULL.\n * This function can only be used if s was opened by avio_open().\n *\n * The internal buffer is automatically flushed before closing the\n * resource.\n *\n * @return 0 on success, an AVERROR < 0 on error.\n * @see avio_close\n */\nint avio_closep(AVIOContext **s);\n\n\n/**\n * Open a write only memory stream.\n *\n * @param s new IO context\n * @return zero if no error.\n */\nint avio_open_dyn_buf(AVIOContext **s);\n\n/**\n * Return the written size and a pointer to the buffer. The buffer\n * must be freed with av_free().\n * Padding of FF_INPUT_BUFFER_PADDING_SIZE is added to the buffer.\n *\n * @param s IO context\n * @param pbuffer pointer to a byte buffer\n * @return the length of the byte buffer\n */\nint avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer);\n\n/**\n * Iterate through names of available protocols.\n *\n * @param opaque A private pointer representing current protocol.\n *        It must be a pointer to NULL on first iteration and will\n *        be updated by successive calls to avio_enum_protocols.\n * @param output If set to 1, iterate over output protocols,\n *               otherwise over input protocols.\n *\n * @return A static string containing the name of current protocol or NULL\n */\nconst char *avio_enum_protocols(void **opaque, int output);\n\n/**\n * Pause and resume playing - only meaningful if using a network streaming\n * protocol (e.g. MMS).\n *\n * @param h     IO context from which to call the read_pause function pointer\n * @param pause 1 for pause, 0 for resume\n */\nint     avio_pause(AVIOContext *h, int pause);\n\n/**\n * Seek to a given timestamp relative to some component stream.\n * Only meaningful if using a network streaming protocol (e.g. MMS.).\n *\n * @param h IO context from which to call the seek function pointers\n * @param stream_index The stream index that the timestamp is relative to.\n *        If stream_index is (-1) the timestamp should be in AV_TIME_BASE\n *        units from the beginning of the presentation.\n *        If a stream_index >= 0 is used and the protocol does not support\n *        seeking based on component streams, the call will fail.\n * @param timestamp timestamp in AVStream.time_base units\n *        or if there is no stream specified then in AV_TIME_BASE units.\n * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE\n *        and AVSEEK_FLAG_ANY. The protocol may silently ignore\n *        AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will\n *        fail if used and not supported.\n * @return >= 0 on success\n * @see AVInputFormat::read_seek\n */\nint64_t avio_seek_time(AVIOContext *h, int stream_index,\n                       int64_t timestamp, int flags);\n\n/* Avoid a warning. The header can not be included because it breaks c++. */\nstruct AVBPrint;\n\n/**\n * Read contents of h into print buffer, up to max_size bytes, or up to EOF.\n *\n * @return 0 for success (max_size bytes read or EOF reached), negative error\n * code otherwise\n */\nint avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size);\n\n#endif /* AVFORMAT_AVIO_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavformat/version.h",
    "content": "/*\n * Version macros.\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVFORMAT_VERSION_H\n#define AVFORMAT_VERSION_H\n\n/**\n * @file\n * @ingroup libavf\n * Libavformat version macros\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBAVFORMAT_VERSION_MAJOR 56\n#define LIBAVFORMAT_VERSION_MINOR  25\n#define LIBAVFORMAT_VERSION_MICRO 101\n\n#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \\\n                                               LIBAVFORMAT_VERSION_MINOR, \\\n                                               LIBAVFORMAT_VERSION_MICRO)\n#define LIBAVFORMAT_VERSION     AV_VERSION(LIBAVFORMAT_VERSION_MAJOR,   \\\n                                           LIBAVFORMAT_VERSION_MINOR,   \\\n                                           LIBAVFORMAT_VERSION_MICRO)\n#define LIBAVFORMAT_BUILD       LIBAVFORMAT_VERSION_INT\n\n#define LIBAVFORMAT_IDENT       \"Lavf\" AV_STRINGIFY(LIBAVFORMAT_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n#ifndef FF_API_LAVF_BITEXACT\n#define FF_API_LAVF_BITEXACT            (LIBAVFORMAT_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_LAVF_FRAC\n#define FF_API_LAVF_FRAC                (LIBAVFORMAT_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_LAVF_CODEC_TB\n#define FF_API_LAVF_CODEC_TB            (LIBAVFORMAT_VERSION_MAJOR < 57)\n#endif\n#ifndef FF_API_URL_FEOF\n#define FF_API_URL_FEOF                 (LIBAVFORMAT_VERSION_MAJOR < 57)\n#endif\n\n#ifndef FF_API_R_FRAME_RATE\n#define FF_API_R_FRAME_RATE            1\n#endif\n#endif /* AVFORMAT_VERSION_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/adler32.h",
    "content": "/*\n * copyright (c) 2006 Mans Rullgard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_ADLER32_H\n#define AVUTIL_ADLER32_H\n\n#include <stdint.h>\n#include \"attributes.h\"\n\n/**\n * @file\n * Public header for libavutil Adler32 hasher\n *\n * @defgroup lavu_adler32 Adler32\n * @ingroup lavu_crypto\n * @{\n */\n\n/**\n * Calculate the Adler32 checksum of a buffer.\n *\n * Passing the return value to a subsequent av_adler32_update() call\n * allows the checksum of multiple buffers to be calculated as though\n * they were concatenated.\n *\n * @param adler initial checksum value\n * @param buf   pointer to input buffer\n * @param len   size of input buffer\n * @return      updated checksum\n */\nunsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,\n                                unsigned int len) av_pure;\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_ADLER32_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/aes.h",
    "content": "/*\n * copyright (c) 2007 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_AES_H\n#define AVUTIL_AES_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_aes AES\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_aes_size;\n\nstruct AVAES;\n\n/**\n * Allocate an AVAES context.\n */\nstruct AVAES *av_aes_alloc(void);\n\n/**\n * Initialize an AVAES context.\n * @param key_bits 128, 192 or 256\n * @param decrypt 0 for encryption, 1 for decryption\n */\nint av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt);\n\n/**\n * Encrypt or decrypt a buffer using a previously initialized context.\n * @param count number of 16 byte blocks\n * @param dst destination array, can be equal to src\n * @param src source array, can be equal to dst\n * @param iv initialization vector for CBC mode, if NULL then ECB will be used\n * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_AES_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/attributes.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * Macro definitions for various function/variable attributes\n */\n\n#ifndef AVUTIL_ATTRIBUTES_H\n#define AVUTIL_ATTRIBUTES_H\n\n#ifdef __GNUC__\n#    define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))\n#else\n#    define AV_GCC_VERSION_AT_LEAST(x,y) 0\n#endif\n\n#ifndef av_always_inline\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define av_always_inline __attribute__((always_inline)) inline\n#elif defined(_MSC_VER)\n#    define av_always_inline __forceinline\n#else\n#    define av_always_inline inline\n#endif\n#endif\n\n#ifndef av_extern_inline\n#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)\n#    define av_extern_inline extern inline\n#else\n#    define av_extern_inline inline\n#endif\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define av_noinline __attribute__((noinline))\n#elif defined(_MSC_VER)\n#    define av_noinline __declspec(noinline)\n#else\n#    define av_noinline\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define av_pure __attribute__((pure))\n#else\n#    define av_pure\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(2,6)\n#    define av_const __attribute__((const))\n#else\n#    define av_const\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(4,3)\n#    define av_cold __attribute__((cold))\n#else\n#    define av_cold\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)\n#    define av_flatten __attribute__((flatten))\n#else\n#    define av_flatten\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define attribute_deprecated __attribute__((deprecated))\n#elif defined(_MSC_VER)\n#    define attribute_deprecated __declspec(deprecated)\n#else\n#    define attribute_deprecated\n#endif\n\n/**\n * Disable warnings about deprecated features\n * This is useful for sections of code kept for backward compatibility and\n * scheduled for removal.\n */\n#ifndef AV_NOWARN_DEPRECATED\n#if AV_GCC_VERSION_AT_LEAST(4,6)\n#    define AV_NOWARN_DEPRECATED(code) \\\n        _Pragma(\"GCC diagnostic push\") \\\n        _Pragma(\"GCC diagnostic ignored \\\"-Wdeprecated-declarations\\\"\") \\\n        code \\\n        _Pragma(\"GCC diagnostic pop\")\n#elif defined(_MSC_VER)\n#    define AV_NOWARN_DEPRECATED(code) \\\n        __pragma(warning(push)) \\\n        __pragma(warning(disable : 4996)) \\\n        code; \\\n        __pragma(warning(pop))\n#else\n#    define AV_NOWARN_DEPRECATED(code) code\n#endif\n#endif\n\n\n#if defined(__GNUC__)\n#    define av_unused __attribute__((unused))\n#else\n#    define av_unused\n#endif\n\n/**\n * Mark a variable as used and prevent the compiler from optimizing it\n * away.  This is useful for variables accessed only from inline\n * assembler without the compiler being aware.\n */\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n#    define av_used __attribute__((used))\n#else\n#    define av_used\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,3)\n#   define av_alias __attribute__((may_alias))\n#else\n#   define av_alias\n#endif\n\n#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)\n#    define av_uninit(x) x=x\n#else\n#    define av_uninit(x) x\n#endif\n\n#ifdef __GNUC__\n#    define av_builtin_constant_p __builtin_constant_p\n#    define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))\n#else\n#    define av_builtin_constant_p(x) 0\n#    define av_printf_format(fmtpos, attrpos)\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(2,5)\n#    define av_noreturn __attribute__((noreturn))\n#else\n#    define av_noreturn\n#endif\n\n#endif /* AVUTIL_ATTRIBUTES_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/audio_fifo.h",
    "content": "/*\n * Audio FIFO\n * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * Audio FIFO Buffer\n */\n\n#ifndef AVUTIL_AUDIO_FIFO_H\n#define AVUTIL_AUDIO_FIFO_H\n\n#include \"avutil.h\"\n#include \"fifo.h\"\n#include \"samplefmt.h\"\n\n/**\n * @addtogroup lavu_audio\n * @{\n *\n * @defgroup lavu_audiofifo Audio FIFO Buffer\n * @{\n */\n\n/**\n * Context for an Audio FIFO Buffer.\n *\n * - Operates at the sample level rather than the byte level.\n * - Supports multiple channels with either planar or packed sample format.\n * - Automatic reallocation when writing to a full buffer.\n */\ntypedef struct AVAudioFifo AVAudioFifo;\n\n/**\n * Free an AVAudioFifo.\n *\n * @param af  AVAudioFifo to free\n */\nvoid av_audio_fifo_free(AVAudioFifo *af);\n\n/**\n * Allocate an AVAudioFifo.\n *\n * @param sample_fmt  sample format\n * @param channels    number of channels\n * @param nb_samples  initial allocation size, in samples\n * @return            newly allocated AVAudioFifo, or NULL on error\n */\nAVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels,\n                                 int nb_samples);\n\n/**\n * Reallocate an AVAudioFifo.\n *\n * @param af          AVAudioFifo to reallocate\n * @param nb_samples  new allocation size, in samples\n * @return            0 if OK, or negative AVERROR code on failure\n */\nint av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);\n\n/**\n * Write data to an AVAudioFifo.\n *\n * The AVAudioFifo will be reallocated automatically if the available space\n * is less than nb_samples.\n *\n * @see enum AVSampleFormat\n * The documentation for AVSampleFormat describes the data layout.\n *\n * @param af          AVAudioFifo to write to\n * @param data        audio data plane pointers\n * @param nb_samples  number of samples to write\n * @return            number of samples actually written, or negative AVERROR\n *                    code on failure. If successful, the number of samples\n *                    actually written will always be nb_samples.\n */\nint av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);\n\n/**\n * Read data from an AVAudioFifo.\n *\n * @see enum AVSampleFormat\n * The documentation for AVSampleFormat describes the data layout.\n *\n * @param af          AVAudioFifo to read from\n * @param data        audio data plane pointers\n * @param nb_samples  number of samples to read\n * @return            number of samples actually read, or negative AVERROR code\n *                    on failure. The number of samples actually read will not\n *                    be greater than nb_samples, and will only be less than\n *                    nb_samples if av_audio_fifo_size is less than nb_samples.\n */\nint av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples);\n\n/**\n * Drain data from an AVAudioFifo.\n *\n * Removes the data without reading it.\n *\n * @param af          AVAudioFifo to drain\n * @param nb_samples  number of samples to drain\n * @return            0 if OK, or negative AVERROR code on failure\n */\nint av_audio_fifo_drain(AVAudioFifo *af, int nb_samples);\n\n/**\n * Reset the AVAudioFifo buffer.\n *\n * This empties all data in the buffer.\n *\n * @param af  AVAudioFifo to reset\n */\nvoid av_audio_fifo_reset(AVAudioFifo *af);\n\n/**\n * Get the current number of samples in the AVAudioFifo available for reading.\n *\n * @param af  the AVAudioFifo to query\n * @return    number of samples available for reading\n */\nint av_audio_fifo_size(AVAudioFifo *af);\n\n/**\n * Get the current number of samples in the AVAudioFifo available for writing.\n *\n * @param af  the AVAudioFifo to query\n * @return    number of samples available for writing\n */\nint av_audio_fifo_space(AVAudioFifo *af);\n\n/**\n * @}\n * @}\n */\n\n#endif /* AVUTIL_AUDIO_FIFO_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/audioconvert.h",
    "content": "\n#include \"version.h\"\n\n#if FF_API_AUDIOCONVERT\n#include \"channel_layout.h\"\n#endif\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/avassert.h",
    "content": "/*\n * copyright (c) 2010 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * simple assert() macros that are a bit more flexible than ISO C assert().\n * @author Michael Niedermayer <michaelni@gmx.at>\n */\n\n#ifndef AVUTIL_AVASSERT_H\n#define AVUTIL_AVASSERT_H\n\n#include <stdlib.h>\n#include \"avutil.h\"\n#include \"log.h\"\n\n/**\n * assert() equivalent, that is always enabled.\n */\n#define av_assert0(cond) do {                                           \\\n    if (!(cond)) {                                                      \\\n        av_log(NULL, AV_LOG_PANIC, \"Assertion %s failed at %s:%d\\n\",    \\\n               AV_STRINGIFY(cond), __FILE__, __LINE__);                 \\\n        abort();                                                        \\\n    }                                                                   \\\n} while (0)\n\n\n/**\n * assert() equivalent, that does not lie in speed critical code.\n * These asserts() thus can be enabled without fearing speedloss.\n */\n#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0\n#define av_assert1(cond) av_assert0(cond)\n#else\n#define av_assert1(cond) ((void)0)\n#endif\n\n\n/**\n * assert() equivalent, that does lie in speed critical code.\n */\n#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1\n#define av_assert2(cond) av_assert0(cond)\n#else\n#define av_assert2(cond) ((void)0)\n#endif\n\n#endif /* AVUTIL_AVASSERT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/avconfig.h",
    "content": "/* Generated by ffconf */\n#ifndef AVUTIL_AVCONFIG_H\n#define AVUTIL_AVCONFIG_H\n#define AV_HAVE_BIGENDIAN 0\n#define AV_HAVE_FAST_UNALIGNED 1\n#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0\n#endif /* AVUTIL_AVCONFIG_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/avstring.h",
    "content": "/*\n * Copyright (c) 2007 Mans Rullgard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_AVSTRING_H\n#define AVUTIL_AVSTRING_H\n\n#include <stddef.h>\n#include <stdint.h>\n#include \"attributes.h\"\n\n/**\n * @addtogroup lavu_string\n * @{\n */\n\n/**\n * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to\n * the address of the first character in str after the prefix.\n *\n * @param str input string\n * @param pfx prefix to test\n * @param ptr updated if the prefix is matched inside str\n * @return non-zero if the prefix matches, zero otherwise\n */\nint av_strstart(const char *str, const char *pfx, const char **ptr);\n\n/**\n * Return non-zero if pfx is a prefix of str independent of case. If\n * it is, *ptr is set to the address of the first character in str\n * after the prefix.\n *\n * @param str input string\n * @param pfx prefix to test\n * @param ptr updated if the prefix is matched inside str\n * @return non-zero if the prefix matches, zero otherwise\n */\nint av_stristart(const char *str, const char *pfx, const char **ptr);\n\n/**\n * Locate the first case-independent occurrence in the string haystack\n * of the string needle.  A zero-length string needle is considered to\n * match at the start of haystack.\n *\n * This function is a case-insensitive version of the standard strstr().\n *\n * @param haystack string to search in\n * @param needle   string to search for\n * @return         pointer to the located match within haystack\n *                 or a null pointer if no match\n */\nchar *av_stristr(const char *haystack, const char *needle);\n\n/**\n * Locate the first occurrence of the string needle in the string haystack\n * where not more than hay_length characters are searched. A zero-length\n * string needle is considered to match at the start of haystack.\n *\n * This function is a length-limited version of the standard strstr().\n *\n * @param haystack   string to search in\n * @param needle     string to search for\n * @param hay_length length of string to search in\n * @return           pointer to the located match within haystack\n *                   or a null pointer if no match\n */\nchar *av_strnstr(const char *haystack, const char *needle, size_t hay_length);\n\n/**\n * Copy the string src to dst, but no more than size - 1 bytes, and\n * null-terminate dst.\n *\n * This function is the same as BSD strlcpy().\n *\n * @param dst destination buffer\n * @param src source string\n * @param size size of destination buffer\n * @return the length of src\n *\n * @warning since the return value is the length of src, src absolutely\n * _must_ be a properly 0-terminated string, otherwise this will read beyond\n * the end of the buffer and possibly crash.\n */\nsize_t av_strlcpy(char *dst, const char *src, size_t size);\n\n/**\n * Append the string src to the string dst, but to a total length of\n * no more than size - 1 bytes, and null-terminate dst.\n *\n * This function is similar to BSD strlcat(), but differs when\n * size <= strlen(dst).\n *\n * @param dst destination buffer\n * @param src source string\n * @param size size of destination buffer\n * @return the total length of src and dst\n *\n * @warning since the return value use the length of src and dst, these\n * absolutely _must_ be a properly 0-terminated strings, otherwise this\n * will read beyond the end of the buffer and possibly crash.\n */\nsize_t av_strlcat(char *dst, const char *src, size_t size);\n\n/**\n * Append output to a string, according to a format. Never write out of\n * the destination buffer, and always put a terminating 0 within\n * the buffer.\n * @param dst destination buffer (string to which the output is\n *  appended)\n * @param size total size of the destination buffer\n * @param fmt printf-compatible format string, specifying how the\n *  following parameters are used\n * @return the length of the string that would have been generated\n *  if enough space had been available\n */\nsize_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);\n\n/**\n * Get the count of continuous non zero chars starting from the beginning.\n *\n * @param len maximum number of characters to check in the string, that\n *            is the maximum value which is returned by the function\n */\nstatic inline size_t av_strnlen(const char *s, size_t len)\n{\n    size_t i;\n    for (i = 0; i < len && s[i]; i++)\n        ;\n    return i;\n}\n\n/**\n * Print arguments following specified format into a large enough auto\n * allocated buffer. It is similar to GNU asprintf().\n * @param fmt printf-compatible format string, specifying how the\n *            following parameters are used.\n * @return the allocated string\n * @note You have to free the string yourself with av_free().\n */\nchar *av_asprintf(const char *fmt, ...) av_printf_format(1, 2);\n\n/**\n * Convert a number to a av_malloced string.\n */\nchar *av_d2str(double d);\n\n/**\n * Unescape the given string until a non escaped terminating char,\n * and return the token corresponding to the unescaped string.\n *\n * The normal \\ and ' escaping is supported. Leading and trailing\n * whitespaces are removed, unless they are escaped with '\\' or are\n * enclosed between ''.\n *\n * @param buf the buffer to parse, buf will be updated to point to the\n * terminating char\n * @param term a 0-terminated list of terminating chars\n * @return the malloced unescaped string, which must be av_freed by\n * the user, NULL in case of allocation failure\n */\nchar *av_get_token(const char **buf, const char *term);\n\n/**\n * Split the string into several tokens which can be accessed by\n * successive calls to av_strtok().\n *\n * A token is defined as a sequence of characters not belonging to the\n * set specified in delim.\n *\n * On the first call to av_strtok(), s should point to the string to\n * parse, and the value of saveptr is ignored. In subsequent calls, s\n * should be NULL, and saveptr should be unchanged since the previous\n * call.\n *\n * This function is similar to strtok_r() defined in POSIX.1.\n *\n * @param s the string to parse, may be NULL\n * @param delim 0-terminated list of token delimiters, must be non-NULL\n * @param saveptr user-provided pointer which points to stored\n * information necessary for av_strtok() to continue scanning the same\n * string. saveptr is updated to point to the next character after the\n * first delimiter found, or to NULL if the string was terminated\n * @return the found token, or NULL when no token is found\n */\nchar *av_strtok(char *s, const char *delim, char **saveptr);\n\n/**\n * Locale-independent conversion of ASCII isdigit.\n */\nav_const int av_isdigit(int c);\n\n/**\n * Locale-independent conversion of ASCII isgraph.\n */\nav_const int av_isgraph(int c);\n\n/**\n * Locale-independent conversion of ASCII isspace.\n */\nav_const int av_isspace(int c);\n\n/**\n * Locale-independent conversion of ASCII characters to uppercase.\n */\nstatic inline av_const int av_toupper(int c)\n{\n    if (c >= 'a' && c <= 'z')\n        c ^= 0x20;\n    return c;\n}\n\n/**\n * Locale-independent conversion of ASCII characters to lowercase.\n */\nstatic inline av_const int av_tolower(int c)\n{\n    if (c >= 'A' && c <= 'Z')\n        c ^= 0x20;\n    return c;\n}\n\n/**\n * Locale-independent conversion of ASCII isxdigit.\n */\nav_const int av_isxdigit(int c);\n\n/**\n * Locale-independent case-insensitive compare.\n * @note This means only ASCII-range characters are case-insensitive\n */\nint av_strcasecmp(const char *a, const char *b);\n\n/**\n * Locale-independent case-insensitive compare.\n * @note This means only ASCII-range characters are case-insensitive\n */\nint av_strncasecmp(const char *a, const char *b, size_t n);\n\n\n/**\n * Thread safe basename.\n * @param path the path, on DOS both \\ and / are considered separators.\n * @return pointer to the basename substring.\n */\nconst char *av_basename(const char *path);\n\n/**\n * Thread safe dirname.\n * @param path the path, on DOS both \\ and / are considered separators.\n * @return the path with the separator replaced by the string terminator or \".\".\n * @note the function may change the input string.\n */\nconst char *av_dirname(char *path);\n\n/**\n * Match instances of a name in a comma-separated list of names.\n * @param name  Name to look for.\n * @param names List of names.\n * @return 1 on match, 0 otherwise.\n */\nint av_match_name(const char *name, const char *names);\n\nenum AVEscapeMode {\n    AV_ESCAPE_MODE_AUTO,      ///< Use auto-selected escaping mode.\n    AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping.\n    AV_ESCAPE_MODE_QUOTE,     ///< Use single-quote escaping.\n};\n\n/**\n * Consider spaces special and escape them even in the middle of the\n * string.\n *\n * This is equivalent to adding the whitespace characters to the special\n * characters lists, except it is guaranteed to use the exact same list\n * of whitespace characters as the rest of libavutil.\n */\n#define AV_ESCAPE_FLAG_WHITESPACE 0x01\n\n/**\n * Escape only specified special characters.\n * Without this flag, escape also any characters that may be considered\n * special by av_get_token(), such as the single quote.\n */\n#define AV_ESCAPE_FLAG_STRICT 0x02\n\n/**\n * Escape string in src, and put the escaped string in an allocated\n * string in *dst, which must be freed with av_free().\n *\n * @param dst           pointer where an allocated string is put\n * @param src           string to escape, must be non-NULL\n * @param special_chars string containing the special characters which\n *                      need to be escaped, can be NULL\n * @param mode          escape mode to employ, see AV_ESCAPE_MODE_* macros.\n *                      Any unknown value for mode will be considered equivalent to\n *                      AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without\n *                      notice.\n * @param flags         flags which control how to escape, see AV_ESCAPE_FLAG_ macros\n * @return the length of the allocated string, or a negative error code in case of error\n * @see av_bprint_escape()\n */\nint av_escape(char **dst, const char *src, const char *special_chars,\n              enum AVEscapeMode mode, int flags);\n\n#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES          1 ///< accept codepoints over 0x10FFFF\n#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS             2 ///< accept non-characters - 0xFFFE and 0xFFFF\n#define AV_UTF8_FLAG_ACCEPT_SURROGATES                 4 ///< accept UTF-16 surrogates codes\n#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML\n\n#define AV_UTF8_FLAG_ACCEPT_ALL \\\n    AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES\n\n/**\n * Read and decode a single UTF-8 code point (character) from the\n * buffer in *buf, and update *buf to point to the next byte to\n * decode.\n *\n * In case of an invalid byte sequence, the pointer will be updated to\n * the next byte after the invalid sequence and the function will\n * return an error code.\n *\n * Depending on the specified flags, the function will also fail in\n * case the decoded code point does not belong to a valid range.\n *\n * @note For speed-relevant code a carefully implemented use of\n * GET_UTF8() may be preferred.\n *\n * @param codep   pointer used to return the parsed code in case of success.\n *                The value in *codep is set even in case the range check fails.\n * @param bufp    pointer to the address the first byte of the sequence\n *                to decode, updated by the function to point to the\n *                byte next after the decoded sequence\n * @param buf_end pointer to the end of the buffer, points to the next\n *                byte past the last in the buffer. This is used to\n *                avoid buffer overreads (in case of an unfinished\n *                UTF-8 sequence towards the end of the buffer).\n * @param flags   a collection of AV_UTF8_FLAG_* flags\n * @return >= 0 in case a sequence was successfully read, a negative\n * value in case of invalid sequence\n */\nint av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end,\n                   unsigned int flags);\n\n/**\n * Check if a name is in a list.\n * @returns 0 if not found, or the 1 based index where it has been found in the\n *            list.\n */\nint av_match_list(const char *name, const char *list, char separator);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_AVSTRING_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/avutil.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_AVUTIL_H\n#define AVUTIL_AVUTIL_H\n\n/**\n * @file\n * external API header\n */\n\n/**\n * @mainpage\n *\n * @section ffmpeg_intro Introduction\n *\n * This document describes the usage of the different libraries\n * provided by FFmpeg.\n *\n * @li @ref libavc \"libavcodec\" encoding/decoding library\n * @li @ref lavfi \"libavfilter\" graph-based frame editing library\n * @li @ref libavf \"libavformat\" I/O and muxing/demuxing library\n * @li @ref lavd \"libavdevice\" special devices muxing/demuxing library\n * @li @ref lavu \"libavutil\" common utility library\n * @li @ref lswr \"libswresample\" audio resampling, format conversion and mixing\n * @li @ref lpp  \"libpostproc\" post processing library\n * @li @ref libsws \"libswscale\" color conversion and scaling library\n *\n * @section ffmpeg_versioning Versioning and compatibility\n *\n * Each of the FFmpeg libraries contains a version.h header, which defines a\n * major, minor and micro version number with the\n * <em>LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO}</em> macros. The major version\n * number is incremented with backward incompatible changes - e.g. removing\n * parts of the public API, reordering public struct members, etc. The minor\n * version number is incremented for backward compatible API changes or major\n * new features - e.g. adding a new public function or a new decoder. The micro\n * version number is incremented for smaller changes that a calling program\n * might still want to check for - e.g. changing behavior in a previously\n * unspecified situation.\n *\n * FFmpeg guarantees backward API and ABI compatibility for each library as long\n * as its major version number is unchanged. This means that no public symbols\n * will be removed or renamed. Types and names of the public struct members and\n * values of public macros and enums will remain the same (unless they were\n * explicitly declared as not part of the public API). Documented behavior will\n * not change.\n *\n * In other words, any correct program that works with a given FFmpeg snapshot\n * should work just as well without any changes with any later snapshot with the\n * same major versions. This applies to both rebuilding the program against new\n * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program\n * links against.\n *\n * However, new public symbols may be added and new members may be appended to\n * public structs whose size is not part of public ABI (most public structs in\n * FFmpeg). New macros and enum values may be added. Behavior in undocumented\n * situations may change slightly (and be documented). All those are accompanied\n * by an entry in doc/APIchanges and incrementing either the minor or micro\n * version number.\n */\n\n/**\n * @defgroup lavu Common utility functions\n *\n * @brief\n * libavutil contains the code shared across all the other FFmpeg\n * libraries\n *\n * @note In order to use the functions provided by avutil you must include\n * the specific header.\n *\n * @{\n *\n * @defgroup lavu_crypto Crypto and Hashing\n *\n * @{\n * @}\n *\n * @defgroup lavu_math Maths\n * @{\n *\n * @}\n *\n * @defgroup lavu_string String Manipulation\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_mem Memory Management\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_data Data Structures\n * @{\n *\n * @}\n *\n * @defgroup lavu_audio Audio related\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_error Error Codes\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_log Logging Facility\n *\n * @{\n *\n * @}\n *\n * @defgroup lavu_misc Other\n *\n * @{\n *\n * @defgroup lavu_internal Internal\n *\n * Not exported functions, for internal usage only\n *\n * @{\n *\n * @}\n *\n * @defgroup preproc_misc Preprocessor String Macros\n *\n * @{\n *\n * @}\n *\n * @defgroup version_utils Library Version Macros\n *\n * @{\n *\n * @}\n */\n\n\n/**\n * @addtogroup lavu_ver\n * @{\n */\n\n/**\n * Return the LIBAVUTIL_VERSION_INT constant.\n */\nunsigned avutil_version(void);\n\n/**\n * Return the libavutil build-time configuration.\n */\nconst char *avutil_configuration(void);\n\n/**\n * Return the libavutil license.\n */\nconst char *avutil_license(void);\n\n/**\n * @}\n */\n\n/**\n * @addtogroup lavu_media Media Type\n * @brief Media Type\n */\n\nenum AVMediaType {\n    AVMEDIA_TYPE_UNKNOWN = -1,  ///< Usually treated as AVMEDIA_TYPE_DATA\n    AVMEDIA_TYPE_VIDEO,\n    AVMEDIA_TYPE_AUDIO,\n    AVMEDIA_TYPE_DATA,          ///< Opaque data information usually continuous\n    AVMEDIA_TYPE_SUBTITLE,\n    AVMEDIA_TYPE_ATTACHMENT,    ///< Opaque data information usually sparse\n    AVMEDIA_TYPE_NB\n};\n\n/**\n * Return a string describing the media_type enum, NULL if media_type\n * is unknown.\n */\nconst char *av_get_media_type_string(enum AVMediaType media_type);\n\n/**\n * @defgroup lavu_const Constants\n * @{\n *\n * @defgroup lavu_enc Encoding specific\n *\n * @note those definition should move to avcodec\n * @{\n */\n\n#define FF_LAMBDA_SHIFT 7\n#define FF_LAMBDA_SCALE (1<<FF_LAMBDA_SHIFT)\n#define FF_QP2LAMBDA 118 ///< factor to convert from H.263 QP to lambda\n#define FF_LAMBDA_MAX (256*128-1)\n\n#define FF_QUALITY_SCALE FF_LAMBDA_SCALE //FIXME maybe remove\n\n/**\n * @}\n * @defgroup lavu_time Timestamp specific\n *\n * FFmpeg internal timebase and timestamp definitions\n *\n * @{\n */\n\n/**\n * @brief Undefined timestamp value\n *\n * Usually reported by demuxer that work on containers that do not provide\n * either pts or dts.\n */\n\n#define AV_NOPTS_VALUE          ((int64_t)UINT64_C(0x8000000000000000))\n\n/**\n * Internal time base represented as integer\n */\n\n#define AV_TIME_BASE            1000000\n\n/**\n * Internal time base represented as fractional value\n */\n\n#define AV_TIME_BASE_Q          (AVRational){1, AV_TIME_BASE}\n\n/**\n * @}\n * @}\n * @defgroup lavu_picture Image related\n *\n * AVPicture types, pixel formats and basic image planes manipulation.\n *\n * @{\n */\n\nenum AVPictureType {\n    AV_PICTURE_TYPE_NONE = 0, ///< Undefined\n    AV_PICTURE_TYPE_I,     ///< Intra\n    AV_PICTURE_TYPE_P,     ///< Predicted\n    AV_PICTURE_TYPE_B,     ///< Bi-dir predicted\n    AV_PICTURE_TYPE_S,     ///< S(GMC)-VOP MPEG4\n    AV_PICTURE_TYPE_SI,    ///< Switching Intra\n    AV_PICTURE_TYPE_SP,    ///< Switching Predicted\n    AV_PICTURE_TYPE_BI,    ///< BI type\n};\n\n/**\n * Return a single letter to describe the given picture type\n * pict_type.\n *\n * @param[in] pict_type the picture type @return a single character\n * representing the picture type, '?' if pict_type is unknown\n */\nchar av_get_picture_type_char(enum AVPictureType pict_type);\n\n/**\n * @}\n */\n\n#include \"common.h\"\n#include \"error.h\"\n#include \"rational.h\"\n#include \"version.h\"\n#include \"macros.h\"\n#include \"mathematics.h\"\n#include \"log.h\"\n#include \"pixfmt.h\"\n\n/**\n * Return x default pointer in case p is NULL.\n */\nstatic inline void *av_x_if_null(const void *p, const void *x)\n{\n    return (void *)(intptr_t)(p ? p : x);\n}\n\n/**\n * Compute the length of an integer list.\n *\n * @param elsize  size in bytes of each list element (only 1, 2, 4 or 8)\n * @param term    list terminator (usually 0 or -1)\n * @param list    pointer to the list\n * @return  length of the list, in elements, not counting the terminator\n */\nunsigned av_int_list_length_for_size(unsigned elsize,\n                                     const void *list, uint64_t term) av_pure;\n\n/**\n * Compute the length of an integer list.\n *\n * @param term  list terminator (usually 0 or -1)\n * @param list  pointer to the list\n * @return  length of the list, in elements, not counting the terminator\n */\n#define av_int_list_length(list, term) \\\n    av_int_list_length_for_size(sizeof(*(list)), list, term)\n\n/**\n * Open a file using a UTF-8 filename.\n * The API of this function matches POSIX fopen(), errors are returned through\n * errno.\n */\nFILE *av_fopen_utf8(const char *path, const char *mode);\n\n/**\n * Return the fractional representation of the internal time base.\n */\nAVRational av_get_time_base_q(void);\n\n/**\n * @}\n * @}\n */\n\n#endif /* AVUTIL_AVUTIL_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/base64.h",
    "content": "/*\n * Copyright (c) 2006 Ryan Martell. (rdm4@martellventures.com)\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_BASE64_H\n#define AVUTIL_BASE64_H\n\n#include <stdint.h>\n\n/**\n * @defgroup lavu_base64 Base64\n * @ingroup lavu_crypto\n * @{\n */\n\n\n/**\n * Decode a base64-encoded string.\n *\n * @param out      buffer for decoded data\n * @param in       null-terminated input string\n * @param out_size size in bytes of the out buffer, must be at\n *                 least 3/4 of the length of in\n * @return         number of bytes written, or a negative value in case of\n *                 invalid input\n */\nint av_base64_decode(uint8_t *out, const char *in, int out_size);\n\n/**\n * Encode data to base64 and null-terminate.\n *\n * @param out      buffer for encoded data\n * @param out_size size in bytes of the out buffer (including the\n *                 null terminator), must be at least AV_BASE64_SIZE(in_size)\n * @param in       input buffer containing the data to encode\n * @param in_size  size in bytes of the in buffer\n * @return         out or NULL in case of error\n */\nchar *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);\n\n/**\n * Calculate the output size needed to base64-encode x bytes to a\n * null-terminated string.\n */\n#define AV_BASE64_SIZE(x)  (((x)+2) / 3 * 4 + 1)\n\n /**\n  * @}\n  */\n\n#endif /* AVUTIL_BASE64_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/blowfish.h",
    "content": "/*\n * Blowfish algorithm\n * Copyright (c) 2012 Samuel Pitoiset\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_BLOWFISH_H\n#define AVUTIL_BLOWFISH_H\n\n#include <stdint.h>\n\n/**\n * @defgroup lavu_blowfish Blowfish\n * @ingroup lavu_crypto\n * @{\n */\n\n#define AV_BF_ROUNDS 16\n\ntypedef struct AVBlowfish {\n    uint32_t p[AV_BF_ROUNDS + 2];\n    uint32_t s[4][256];\n} AVBlowfish;\n\n/**\n * Initialize an AVBlowfish context.\n *\n * @param ctx an AVBlowfish context\n * @param key a key\n * @param key_len length of the key\n */\nvoid av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len);\n\n/**\n * Encrypt or decrypt a buffer using a previously initialized context.\n *\n * @param ctx an AVBlowfish context\n * @param xl left four bytes halves of input to be encrypted\n * @param xr right four bytes halves of input to be encrypted\n * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr,\n                           int decrypt);\n\n/**\n * Encrypt or decrypt a buffer using a previously initialized context.\n *\n * @param ctx an AVBlowfish context\n * @param dst destination array, can be equal to src\n * @param src source array, can be equal to dst\n * @param count number of 8 byte blocks\n * @param iv initialization vector for CBC mode, if NULL ECB will be used\n * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src,\n                       int count, uint8_t *iv, int decrypt);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_BLOWFISH_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/bprint.h",
    "content": "/*\n * Copyright (c) 2012 Nicolas George\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_BPRINT_H\n#define AVUTIL_BPRINT_H\n\n#include <stdarg.h>\n\n#include \"attributes.h\"\n#include \"avstring.h\"\n\n/**\n * Define a structure with extra padding to a fixed size\n * This helps ensuring binary compatibility with future versions.\n */\n\n#define FF_PAD_STRUCTURE(name, size, ...) \\\nstruct ff_pad_helper_##name { __VA_ARGS__ }; \\\ntypedef struct name { \\\n    __VA_ARGS__ \\\n    char reserved_padding[size - sizeof(struct ff_pad_helper_##name)]; \\\n} name;\n\n/**\n * Buffer to print data progressively\n *\n * The string buffer grows as necessary and is always 0-terminated.\n * The content of the string is never accessed, and thus is\n * encoding-agnostic and can even hold binary data.\n *\n * Small buffers are kept in the structure itself, and thus require no\n * memory allocation at all (unless the contents of the buffer is needed\n * after the structure goes out of scope). This is almost as lightweight as\n * declaring a local \"char buf[512]\".\n *\n * The length of the string can go beyond the allocated size: the buffer is\n * then truncated, but the functions still keep account of the actual total\n * length.\n *\n * In other words, buf->len can be greater than buf->size and records the\n * total length of what would have been to the buffer if there had been\n * enough memory.\n *\n * Append operations do not need to be tested for failure: if a memory\n * allocation fails, data stop being appended to the buffer, but the length\n * is still updated. This situation can be tested with\n * av_bprint_is_complete().\n *\n * The size_max field determines several possible behaviours:\n *\n * size_max = -1 (= UINT_MAX) or any large value will let the buffer be\n * reallocated as necessary, with an amortized linear cost.\n *\n * size_max = 0 prevents writing anything to the buffer: only the total\n * length is computed. The write operations can then possibly be repeated in\n * a buffer with exactly the necessary size\n * (using size_init = size_max = len + 1).\n *\n * size_max = 1 is automatically replaced by the exact size available in the\n * structure itself, thus ensuring no dynamic memory allocation. The\n * internal buffer is large enough to hold a reasonable paragraph of text,\n * such as the current paragraph.\n */\n\nFF_PAD_STRUCTURE(AVBPrint, 1024,\n    char *str;         /**< string so far */\n    unsigned len;      /**< length so far */\n    unsigned size;     /**< allocated memory */\n    unsigned size_max; /**< maximum allocated memory */\n    char reserved_internal_buffer[1];\n)\n\n/**\n * Convenience macros for special values for av_bprint_init() size_max\n * parameter.\n */\n#define AV_BPRINT_SIZE_UNLIMITED  ((unsigned)-1)\n#define AV_BPRINT_SIZE_AUTOMATIC  1\n#define AV_BPRINT_SIZE_COUNT_ONLY 0\n\n/**\n * Init a print buffer.\n *\n * @param buf        buffer to init\n * @param size_init  initial size (including the final 0)\n * @param size_max   maximum size;\n *                   0 means do not write anything, just count the length;\n *                   1 is replaced by the maximum value for automatic storage;\n *                   any large value means that the internal buffer will be\n *                   reallocated as needed up to that limit; -1 is converted to\n *                   UINT_MAX, the largest limit possible.\n *                   Check also AV_BPRINT_SIZE_* macros.\n */\nvoid av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);\n\n/**\n * Init a print buffer using a pre-existing buffer.\n *\n * The buffer will not be reallocated.\n *\n * @param buf     buffer structure to init\n * @param buffer  byte buffer to use for the string data\n * @param size    size of buffer\n */\nvoid av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size);\n\n/**\n * Append a formatted string to a print buffer.\n */\nvoid av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3);\n\n/**\n * Append a formatted string to a print buffer.\n */\nvoid av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg);\n\n/**\n * Append char c n times to a print buffer.\n */\nvoid av_bprint_chars(AVBPrint *buf, char c, unsigned n);\n\n/**\n * Append data to a print buffer.\n *\n * param buf  bprint buffer to use\n * param data pointer to data\n * param size size of data\n */\nvoid av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size);\n\nstruct tm;\n/**\n * Append a formatted date and time to a print buffer.\n *\n * param buf  bprint buffer to use\n * param fmt  date and time format string, see strftime()\n * param tm   broken-down time structure to translate\n *\n * @note due to poor design of the standard strftime function, it may\n * produce poor results if the format string expands to a very long text and\n * the bprint buffer is near the limit stated by the size_max option.\n */\nvoid av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm);\n\n/**\n * Allocate bytes in the buffer for external use.\n *\n * @param[in]  buf          buffer structure\n * @param[in]  size         required size\n * @param[out] mem          pointer to the memory area\n * @param[out] actual_size  size of the memory area after allocation;\n *                          can be larger or smaller than size\n */\nvoid av_bprint_get_buffer(AVBPrint *buf, unsigned size,\n                          unsigned char **mem, unsigned *actual_size);\n\n/**\n * Reset the string to \"\" but keep internal allocated data.\n */\nvoid av_bprint_clear(AVBPrint *buf);\n\n/**\n * Test if the print buffer is complete (not truncated).\n *\n * It may have been truncated due to a memory allocation failure\n * or the size_max limit (compare size and size_max if necessary).\n */\nstatic inline int av_bprint_is_complete(const AVBPrint *buf)\n{\n    return buf->len < buf->size;\n}\n\n/**\n * Finalize a print buffer.\n *\n * The print buffer can no longer be used afterwards,\n * but the len and size fields are still valid.\n *\n * @arg[out] ret_str  if not NULL, used to return a permanent copy of the\n *                    buffer contents, or NULL if memory allocation fails;\n *                    if NULL, the buffer is discarded and freed\n * @return  0 for success or error code (probably AVERROR(ENOMEM))\n */\nint av_bprint_finalize(AVBPrint *buf, char **ret_str);\n\n/**\n * Escape the content in src and append it to dstbuf.\n *\n * @param dstbuf        already inited destination bprint buffer\n * @param src           string containing the text to escape\n * @param special_chars string containing the special characters which\n *                      need to be escaped, can be NULL\n * @param mode          escape mode to employ, see AV_ESCAPE_MODE_* macros.\n *                      Any unknown value for mode will be considered equivalent to\n *                      AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without\n *                      notice.\n * @param flags         flags which control how to escape, see AV_ESCAPE_FLAG_* macros\n */\nvoid av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars,\n                      enum AVEscapeMode mode, int flags);\n\n#endif /* AVUTIL_BPRINT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/bswap.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * byte swapping routines\n */\n\n#ifndef AVUTIL_BSWAP_H\n#define AVUTIL_BSWAP_H\n\n#include <stdint.h>\n#include \"libavutil/avconfig.h\"\n#include \"attributes.h\"\n\n#ifdef HAVE_AV_CONFIG_H\n\n#include \"config.h\"\n\n#if   ARCH_AARCH64\n#   include \"aarch64/bswap.h\"\n#elif ARCH_ARM\n#   include \"arm/bswap.h\"\n#elif ARCH_AVR32\n#   include \"avr32/bswap.h\"\n#elif ARCH_SH4\n#   include \"sh4/bswap.h\"\n#elif ARCH_X86\n#   include \"x86/bswap.h\"\n#endif\n\n#endif /* HAVE_AV_CONFIG_H */\n\n#define AV_BSWAP16C(x) (((x) << 8 & 0xff00)  | ((x) >> 8 & 0x00ff))\n#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))\n#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))\n\n#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)\n\n#ifndef av_bswap16\nstatic av_always_inline av_const uint16_t av_bswap16(uint16_t x)\n{\n    x= (x>>8) | (x<<8);\n    return x;\n}\n#endif\n\n#ifndef av_bswap32\nstatic av_always_inline av_const uint32_t av_bswap32(uint32_t x)\n{\n    return AV_BSWAP32C(x);\n}\n#endif\n\n#ifndef av_bswap64\nstatic inline uint64_t av_const av_bswap64(uint64_t x)\n{\n    return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);\n}\n#endif\n\n// be2ne ... big-endian to native-endian\n// le2ne ... little-endian to native-endian\n\n#if AV_HAVE_BIGENDIAN\n#define av_be2ne16(x) (x)\n#define av_be2ne32(x) (x)\n#define av_be2ne64(x) (x)\n#define av_le2ne16(x) av_bswap16(x)\n#define av_le2ne32(x) av_bswap32(x)\n#define av_le2ne64(x) av_bswap64(x)\n#define AV_BE2NEC(s, x) (x)\n#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)\n#else\n#define av_be2ne16(x) av_bswap16(x)\n#define av_be2ne32(x) av_bswap32(x)\n#define av_be2ne64(x) av_bswap64(x)\n#define av_le2ne16(x) (x)\n#define av_le2ne32(x) (x)\n#define av_le2ne64(x) (x)\n#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)\n#define AV_LE2NEC(s, x) (x)\n#endif\n\n#define AV_BE2NE16C(x) AV_BE2NEC(16, x)\n#define AV_BE2NE32C(x) AV_BE2NEC(32, x)\n#define AV_BE2NE64C(x) AV_BE2NEC(64, x)\n#define AV_LE2NE16C(x) AV_LE2NEC(16, x)\n#define AV_LE2NE32C(x) AV_LE2NEC(32, x)\n#define AV_LE2NE64C(x) AV_LE2NEC(64, x)\n\n#endif /* AVUTIL_BSWAP_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/buffer.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * @ingroup lavu_buffer\n * refcounted data buffer API\n */\n\n#ifndef AVUTIL_BUFFER_H\n#define AVUTIL_BUFFER_H\n\n#include <stdint.h>\n\n/**\n * @defgroup lavu_buffer AVBuffer\n * @ingroup lavu_data\n *\n * @{\n * AVBuffer is an API for reference-counted data buffers.\n *\n * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer\n * represents the data buffer itself; it is opaque and not meant to be accessed\n * by the caller directly, but only through AVBufferRef. However, the caller may\n * e.g. compare two AVBuffer pointers to check whether two different references\n * are describing the same data buffer. AVBufferRef represents a single\n * reference to an AVBuffer and it is the object that may be manipulated by the\n * caller directly.\n *\n * There are two functions provided for creating a new AVBuffer with a single\n * reference -- av_buffer_alloc() to just allocate a new buffer, and\n * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing\n * reference, additional references may be created with av_buffer_ref().\n * Use av_buffer_unref() to free a reference (this will automatically free the\n * data once all the references are freed).\n *\n * The convention throughout this API and the rest of FFmpeg is such that the\n * buffer is considered writable if there exists only one reference to it (and\n * it has not been marked as read-only). The av_buffer_is_writable() function is\n * provided to check whether this is true and av_buffer_make_writable() will\n * automatically create a new writable buffer when necessary.\n * Of course nothing prevents the calling code from violating this convention,\n * however that is safe only when all the existing references are under its\n * control.\n *\n * @note Referencing and unreferencing the buffers is thread-safe and thus\n * may be done from multiple threads simultaneously without any need for\n * additional locking.\n *\n * @note Two different references to the same buffer can point to different\n * parts of the buffer (i.e. their AVBufferRef.data will not be equal).\n */\n\n/**\n * A reference counted buffer type. It is opaque and is meant to be used through\n * references (AVBufferRef).\n */\ntypedef struct AVBuffer AVBuffer;\n\n/**\n * A reference to a data buffer.\n *\n * The size of this struct is not a part of the public ABI and it is not meant\n * to be allocated directly.\n */\ntypedef struct AVBufferRef {\n    AVBuffer *buffer;\n\n    /**\n     * The data buffer. It is considered writable if and only if\n     * this is the only reference to the buffer, in which case\n     * av_buffer_is_writable() returns 1.\n     */\n    uint8_t *data;\n    /**\n     * Size of data in bytes.\n     */\n    int      size;\n} AVBufferRef;\n\n/**\n * Allocate an AVBuffer of the given size using av_malloc().\n *\n * @return an AVBufferRef of given size or NULL when out of memory\n */\nAVBufferRef *av_buffer_alloc(int size);\n\n/**\n * Same as av_buffer_alloc(), except the returned buffer will be initialized\n * to zero.\n */\nAVBufferRef *av_buffer_allocz(int size);\n\n/**\n * Always treat the buffer as read-only, even when it has only one\n * reference.\n */\n#define AV_BUFFER_FLAG_READONLY (1 << 0)\n\n/**\n * Create an AVBuffer from an existing array.\n *\n * If this function is successful, data is owned by the AVBuffer. The caller may\n * only access data through the returned AVBufferRef and references derived from\n * it.\n * If this function fails, data is left untouched.\n * @param data   data array\n * @param size   size of data in bytes\n * @param free   a callback for freeing this buffer's data\n * @param opaque parameter to be got for processing or passed to free\n * @param flags  a combination of AV_BUFFER_FLAG_*\n *\n * @return an AVBufferRef referring to data on success, NULL on failure.\n */\nAVBufferRef *av_buffer_create(uint8_t *data, int size,\n                              void (*free)(void *opaque, uint8_t *data),\n                              void *opaque, int flags);\n\n/**\n * Default free callback, which calls av_free() on the buffer data.\n * This function is meant to be passed to av_buffer_create(), not called\n * directly.\n */\nvoid av_buffer_default_free(void *opaque, uint8_t *data);\n\n/**\n * Create a new reference to an AVBuffer.\n *\n * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on\n * failure.\n */\nAVBufferRef *av_buffer_ref(AVBufferRef *buf);\n\n/**\n * Free a given reference and automatically free the buffer if there are no more\n * references to it.\n *\n * @param buf the reference to be freed. The pointer is set to NULL on return.\n */\nvoid av_buffer_unref(AVBufferRef **buf);\n\n/**\n * @return 1 if the caller may write to the data referred to by buf (which is\n * true if and only if buf is the only reference to the underlying AVBuffer).\n * Return 0 otherwise.\n * A positive answer is valid until av_buffer_ref() is called on buf.\n */\nint av_buffer_is_writable(const AVBufferRef *buf);\n\n/**\n * @return the opaque parameter set by av_buffer_create.\n */\nvoid *av_buffer_get_opaque(const AVBufferRef *buf);\n\nint av_buffer_get_ref_count(const AVBufferRef *buf);\n\n/**\n * Create a writable reference from a given buffer reference, avoiding data copy\n * if possible.\n *\n * @param buf buffer reference to make writable. On success, buf is either left\n *            untouched, or it is unreferenced and a new writable AVBufferRef is\n *            written in its place. On failure, buf is left untouched.\n * @return 0 on success, a negative AVERROR on failure.\n */\nint av_buffer_make_writable(AVBufferRef **buf);\n\n/**\n * Reallocate a given buffer.\n *\n * @param buf  a buffer reference to reallocate. On success, buf will be\n *             unreferenced and a new reference with the required size will be\n *             written in its place. On failure buf will be left untouched. *buf\n *             may be NULL, then a new buffer is allocated.\n * @param size required new buffer size.\n * @return 0 on success, a negative AVERROR on failure.\n *\n * @note the buffer is actually reallocated with av_realloc() only if it was\n * initially allocated through av_buffer_realloc(NULL) and there is only one\n * reference to it (i.e. the one passed to this function). In all other cases\n * a new buffer is allocated and the data is copied.\n */\nint av_buffer_realloc(AVBufferRef **buf, int size);\n\n/**\n * @}\n */\n\n/**\n * @defgroup lavu_bufferpool AVBufferPool\n * @ingroup lavu_data\n *\n * @{\n * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.\n *\n * Frequently allocating and freeing large buffers may be slow. AVBufferPool is\n * meant to solve this in cases when the caller needs a set of buffers of the\n * same size (the most obvious use case being buffers for raw video or audio\n * frames).\n *\n * At the beginning, the user must call av_buffer_pool_init() to create the\n * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to\n * get a reference to a new buffer, similar to av_buffer_alloc(). This new\n * reference works in all aspects the same way as the one created by\n * av_buffer_alloc(). However, when the last reference to this buffer is\n * unreferenced, it is returned to the pool instead of being freed and will be\n * reused for subsequent av_buffer_pool_get() calls.\n *\n * When the caller is done with the pool and no longer needs to allocate any new\n * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.\n * Once all the buffers are released, it will automatically be freed.\n *\n * Allocating and releasing buffers with this API is thread-safe as long as\n * either the default alloc callback is used, or the user-supplied one is\n * thread-safe.\n */\n\n/**\n * The buffer pool. This structure is opaque and not meant to be accessed\n * directly. It is allocated with av_buffer_pool_init() and freed with\n * av_buffer_pool_uninit().\n */\ntypedef struct AVBufferPool AVBufferPool;\n\n/**\n * Allocate and initialize a buffer pool.\n *\n * @param size size of each buffer in this pool\n * @param alloc a function that will be used to allocate new buffers when the\n * pool is empty. May be NULL, then the default allocator will be used\n * (av_buffer_alloc()).\n * @return newly created buffer pool on success, NULL on error.\n */\nAVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));\n\n/**\n * Mark the pool as being available for freeing. It will actually be freed only\n * once all the allocated buffers associated with the pool are released. Thus it\n * is safe to call this function while some of the allocated buffers are still\n * in use.\n *\n * @param pool pointer to the pool to be freed. It will be set to NULL.\n * @see av_buffer_pool_can_uninit()\n */\nvoid av_buffer_pool_uninit(AVBufferPool **pool);\n\n/**\n * Allocate a new AVBuffer, reusing an old buffer from the pool when available.\n * This function may be called simultaneously from multiple threads.\n *\n * @return a reference to the new buffer on success, NULL on error.\n */\nAVBufferRef *av_buffer_pool_get(AVBufferPool *pool);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_BUFFER_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/camellia.h",
    "content": "/*\n * An implementation of the CAMELLIA algorithm as mentioned in RFC3713\n * Copyright (c) 2014 Supraja Meedinti\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_CAMELLIA_H\n#define AVUTIL_CAMELLIA_H\n\n#include <stdint.h>\n\n\n/**\n  * @file\n  * @brief Public header for libavutil CAMELLIA algorithm\n  * @defgroup lavu_camellia CAMELLIA\n  * @ingroup lavu_crypto\n  * @{\n  */\n\nextern const int av_camellia_size;\n\nstruct AVCAMELLIA;\n\n/**\n  * Allocate an AVCAMELLIA context\n  * To free the struct: av_free(ptr)\n  */\nstruct AVCAMELLIA *av_camellia_alloc(void);\n\n/**\n  * Initialize an AVCAMELLIA context.\n  *\n  * @param ctx an AVCAMELLIA context\n  * @param key a key of 16, 24, 32 bytes used for encryption/decryption\n  * @param key_bits number of keybits: possible are 128, 192, 256\n */\nint av_camellia_init(struct AVCAMELLIA *ctx, const uint8_t *key, int key_bits);\n\n/**\n  * Encrypt or decrypt a buffer using a previously initialized context\n  *\n  * @param ctx an AVCAMELLIA context\n  * @param dst destination array, can be equal to src\n  * @param src source array, can be equal to dst\n  * @param count number of 16 byte blocks\n  * @paran iv initialization vector for CBC mode, NULL for ECB mode\n  * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_camellia_crypt(struct AVCAMELLIA *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt);\n\n/**\n * @}\n */\n#endif /* AVUTIL_CAMELLIA_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/cast5.h",
    "content": "/*\n * An implementation of the CAST128 algorithm as mentioned in RFC2144\n * Copyright (c) 2014 Supraja Meedinti\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_CAST5_H\n#define AVUTIL_CAST5_H\n\n#include <stdint.h>\n\n\n/**\n  * @file\n  * @brief Public header for libavutil CAST5 algorithm\n  * @defgroup lavu_cast5 CAST5\n  * @ingroup lavu_crypto\n  * @{\n  */\n\nextern const int av_cast5_size;\n\nstruct AVCAST5;\n\n/**\n  * Allocate an AVCAST5 context\n  * To free the struct: av_free(ptr)\n  */\nstruct AVCAST5 *av_cast5_alloc(void);\n/**\n  * Initialize an AVCAST5 context.\n  *\n  * @param ctx an AVCAST5 context\n  * @param key a key of 5,6,...16 bytes used for encryption/decryption\n  * @param key_bits number of keybits: possible are 40,48,...,128\n */\nint av_cast5_init(struct AVCAST5 *ctx, const uint8_t *key, int key_bits);\n\n/**\n  * Encrypt or decrypt a buffer using a previously initialized context, ECB mode only\n  *\n  * @param ctx an AVCAST5 context\n  * @param dst destination array, can be equal to src\n  * @param src source array, can be equal to dst\n  * @param count number of 8 byte blocks\n  * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_cast5_crypt(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, int decrypt);\n\n/**\n  * Encrypt or decrypt a buffer using a previously initialized context\n  *\n  * @param ctx an AVCAST5 context\n  * @param dst destination array, can be equal to src\n  * @param src source array, can be equal to dst\n  * @param count number of 8 byte blocks\n  * @param iv initialization vector for CBC mode, NULL for ECB mode\n  * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_cast5_crypt2(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);\n/**\n * @}\n */\n#endif /* AVUTIL_CAST5_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/channel_layout.h",
    "content": "/*\n * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n * Copyright (c) 2008 Peter Ross\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_CHANNEL_LAYOUT_H\n#define AVUTIL_CHANNEL_LAYOUT_H\n\n#include <stdint.h>\n\n/**\n * @file\n * audio channel layout utility functions\n */\n\n/**\n * @addtogroup lavu_audio\n * @{\n */\n\n/**\n * @defgroup channel_masks Audio channel masks\n *\n * A channel layout is a 64-bits integer with a bit set for every channel.\n * The number of bits set must be equal to the number of channels.\n * The value 0 means that the channel layout is not known.\n * @note this data structure is not powerful enough to handle channels\n * combinations that have the same channel multiple times, such as\n * dual-mono.\n *\n * @{\n */\n#define AV_CH_FRONT_LEFT             0x00000001\n#define AV_CH_FRONT_RIGHT            0x00000002\n#define AV_CH_FRONT_CENTER           0x00000004\n#define AV_CH_LOW_FREQUENCY          0x00000008\n#define AV_CH_BACK_LEFT              0x00000010\n#define AV_CH_BACK_RIGHT             0x00000020\n#define AV_CH_FRONT_LEFT_OF_CENTER   0x00000040\n#define AV_CH_FRONT_RIGHT_OF_CENTER  0x00000080\n#define AV_CH_BACK_CENTER            0x00000100\n#define AV_CH_SIDE_LEFT              0x00000200\n#define AV_CH_SIDE_RIGHT             0x00000400\n#define AV_CH_TOP_CENTER             0x00000800\n#define AV_CH_TOP_FRONT_LEFT         0x00001000\n#define AV_CH_TOP_FRONT_CENTER       0x00002000\n#define AV_CH_TOP_FRONT_RIGHT        0x00004000\n#define AV_CH_TOP_BACK_LEFT          0x00008000\n#define AV_CH_TOP_BACK_CENTER        0x00010000\n#define AV_CH_TOP_BACK_RIGHT         0x00020000\n#define AV_CH_STEREO_LEFT            0x20000000  ///< Stereo downmix.\n#define AV_CH_STEREO_RIGHT           0x40000000  ///< See AV_CH_STEREO_LEFT.\n#define AV_CH_WIDE_LEFT              0x0000000080000000ULL\n#define AV_CH_WIDE_RIGHT             0x0000000100000000ULL\n#define AV_CH_SURROUND_DIRECT_LEFT   0x0000000200000000ULL\n#define AV_CH_SURROUND_DIRECT_RIGHT  0x0000000400000000ULL\n#define AV_CH_LOW_FREQUENCY_2        0x0000000800000000ULL\n\n/** Channel mask value used for AVCodecContext.request_channel_layout\n    to indicate that the user requests the channel order of the decoder output\n    to be the native codec channel order. */\n#define AV_CH_LAYOUT_NATIVE          0x8000000000000000ULL\n\n/**\n * @}\n * @defgroup channel_mask_c Audio channel layouts\n * @{\n * */\n#define AV_CH_LAYOUT_MONO              (AV_CH_FRONT_CENTER)\n#define AV_CH_LAYOUT_STEREO            (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)\n#define AV_CH_LAYOUT_2POINT1           (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_2_1               (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_SURROUND          (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)\n#define AV_CH_LAYOUT_3POINT1           (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_4POINT0           (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_4POINT1           (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_2_2               (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)\n#define AV_CH_LAYOUT_QUAD              (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_5POINT0           (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)\n#define AV_CH_LAYOUT_5POINT1           (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_5POINT0_BACK      (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_5POINT1_BACK      (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_6POINT0           (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_6POINT0_FRONT     (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)\n#define AV_CH_LAYOUT_HEXAGONAL         (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_6POINT1           (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_6POINT1_BACK      (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)\n#define AV_CH_LAYOUT_6POINT1_FRONT     (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)\n#define AV_CH_LAYOUT_7POINT0           (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_7POINT0_FRONT     (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)\n#define AV_CH_LAYOUT_7POINT1           (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_7POINT1_WIDE      (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)\n#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)\n#define AV_CH_LAYOUT_OCTAGONAL         (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)\n#define AV_CH_LAYOUT_STEREO_DOWNMIX    (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)\n\nenum AVMatrixEncoding {\n    AV_MATRIX_ENCODING_NONE,\n    AV_MATRIX_ENCODING_DOLBY,\n    AV_MATRIX_ENCODING_DPLII,\n    AV_MATRIX_ENCODING_DPLIIX,\n    AV_MATRIX_ENCODING_DPLIIZ,\n    AV_MATRIX_ENCODING_DOLBYEX,\n    AV_MATRIX_ENCODING_DOLBYHEADPHONE,\n    AV_MATRIX_ENCODING_NB\n};\n\n/**\n * Return a channel layout id that matches name, or 0 if no match is found.\n *\n * name can be one or several of the following notations,\n * separated by '+' or '|':\n * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,\n *   5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);\n * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,\n *   SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);\n * - a number of channels, in decimal, optionally followed by 'c', yielding\n *   the default channel layout for that number of channels (@see\n *   av_get_default_channel_layout);\n * - a channel layout mask, in hexadecimal starting with \"0x\" (see the\n *   AV_CH_* macros).\n *\n * @warning Starting from the next major bump the trailing character\n * 'c' to specify a number of channels will be required, while a\n * channel layout mask could also be specified as a decimal number\n * (if and only if not followed by \"c\").\n *\n * Example: \"stereo+FC\" = \"2c+FC\" = \"2c+1c\" = \"0x7\"\n */\nuint64_t av_get_channel_layout(const char *name);\n\n/**\n * Return a description of a channel layout.\n * If nb_channels is <= 0, it is guessed from the channel_layout.\n *\n * @param buf put here the string containing the channel layout\n * @param buf_size size in bytes of the buffer\n */\nvoid av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);\n\nstruct AVBPrint;\n/**\n * Append a description of a channel layout to a bprint buffer.\n */\nvoid av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);\n\n/**\n * Return the number of channels in the channel layout.\n */\nint av_get_channel_layout_nb_channels(uint64_t channel_layout);\n\n/**\n * Return default channel layout for a given number of channels.\n */\nint64_t av_get_default_channel_layout(int nb_channels);\n\n/**\n * Get the index of a channel in channel_layout.\n *\n * @param channel a channel layout describing exactly one channel which must be\n *                present in channel_layout.\n *\n * @return index of channel in channel_layout on success, a negative AVERROR\n *         on error.\n */\nint av_get_channel_layout_channel_index(uint64_t channel_layout,\n                                        uint64_t channel);\n\n/**\n * Get the channel with the given index in channel_layout.\n */\nuint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);\n\n/**\n * Get the name of a given channel.\n *\n * @return channel name on success, NULL on error.\n */\nconst char *av_get_channel_name(uint64_t channel);\n\n/**\n * Get the description of a given channel.\n *\n * @param channel  a channel layout with a single channel\n * @return  channel description on success, NULL on error\n */\nconst char *av_get_channel_description(uint64_t channel);\n\n/**\n * Get the value and name of a standard channel layout.\n *\n * @param[in]  index   index in an internal list, starting at 0\n * @param[out] layout  channel layout mask\n * @param[out] name    name of the layout\n * @return  0  if the layout exists,\n *          <0 if index is beyond the limits\n */\nint av_get_standard_channel_layout(unsigned index, uint64_t *layout,\n                                   const char **name);\n\n/**\n * @}\n * @}\n */\n\n#endif /* AVUTIL_CHANNEL_LAYOUT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/common.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * common internal and external API header\n */\n\n#ifndef AVUTIL_COMMON_H\n#define AVUTIL_COMMON_H\n\n#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C)\n#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS\n#endif\n\n#include <errno.h>\n#include <inttypes.h>\n#include <limits.h>\n#include <math.h>\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n#include \"libavutil/avconfig.h\"\n\n#if AV_HAVE_BIGENDIAN\n#   define AV_NE(be, le) (be)\n#else\n#   define AV_NE(be, le) (le)\n#endif\n\n//rounded division & shift\n#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))\n/* assume b>0 */\n#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))\n/* assume a>0 and b>0 */\n#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \\\n                                                       : ((a) + (1<<(b)) - 1) >> (b))\n#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b))\n#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b))\n#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))\n#define FFSIGN(a) ((a) > 0 ? 1 : -1)\n\n#define FFMAX(a,b) ((a) > (b) ? (a) : (b))\n#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)\n#define FFMIN(a,b) ((a) > (b) ? (b) : (a))\n#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)\n\n#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)\n#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))\n#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))\n\n/* misc math functions */\n\n/**\n * Reverse the order of the bits of an 8-bits unsigned integer.\n */\n#if FF_API_AV_REVERSE\nextern attribute_deprecated const uint8_t av_reverse[256];\n#endif\n\n#ifdef HAVE_AV_CONFIG_H\n#   include \"config.h\"\n#   include \"intmath.h\"\n#endif\n\n/* Pull in unguarded fallback defines at the end of this file. */\n#include \"common.h\"\n\n#ifndef av_log2\nav_const int av_log2(unsigned v);\n#endif\n\n#ifndef av_log2_16bit\nav_const int av_log2_16bit(unsigned v);\n#endif\n\n/**\n * Clip a signed integer value into the amin-amax range.\n * @param a value to clip\n * @param amin minimum value of the clip range\n * @param amax maximum value of the clip range\n * @return clipped value\n */\nstatic av_always_inline av_const int av_clip_c(int a, int amin, int amax)\n{\n#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2\n    if (amin > amax) abort();\n#endif\n    if      (a < amin) return amin;\n    else if (a > amax) return amax;\n    else               return a;\n}\n\n/**\n * Clip a signed 64bit integer value into the amin-amax range.\n * @param a value to clip\n * @param amin minimum value of the clip range\n * @param amax maximum value of the clip range\n * @return clipped value\n */\nstatic av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax)\n{\n#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2\n    if (amin > amax) abort();\n#endif\n    if      (a < amin) return amin;\n    else if (a > amax) return amax;\n    else               return a;\n}\n\n/**\n * Clip a signed integer value into the 0-255 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const uint8_t av_clip_uint8_c(int a)\n{\n    if (a&(~0xFF)) return (-a)>>31;\n    else           return a;\n}\n\n/**\n * Clip a signed integer value into the -128,127 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const int8_t av_clip_int8_c(int a)\n{\n    if ((a+0x80U) & ~0xFF) return (a>>31) ^ 0x7F;\n    else                  return a;\n}\n\n/**\n * Clip a signed integer value into the 0-65535 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const uint16_t av_clip_uint16_c(int a)\n{\n    if (a&(~0xFFFF)) return (-a)>>31;\n    else             return a;\n}\n\n/**\n * Clip a signed integer value into the -32768,32767 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const int16_t av_clip_int16_c(int a)\n{\n    if ((a+0x8000U) & ~0xFFFF) return (a>>31) ^ 0x7FFF;\n    else                      return a;\n}\n\n/**\n * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.\n * @param a value to clip\n * @return clipped value\n */\nstatic av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)\n{\n    if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF);\n    else                                         return (int32_t)a;\n}\n\n/**\n * Clip a signed integer into the -(2^p),(2^p-1) range.\n * @param  a value to clip\n * @param  p bit position to clip at\n * @return clipped value\n */\nstatic av_always_inline av_const int av_clip_intp2_c(int a, int p)\n{\n    if ((a + (1 << p)) & ~((2 << p) - 1))\n        return (a >> 31) ^ ((1 << p) - 1);\n    else\n        return a;\n}\n\n/**\n * Clip a signed integer to an unsigned power of two range.\n * @param  a value to clip\n * @param  p bit position to clip at\n * @return clipped value\n */\nstatic av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)\n{\n    if (a & ~((1<<p) - 1)) return -a >> 31 & ((1<<p) - 1);\n    else                   return  a;\n}\n\n/**\n * Add two signed 32-bit values with saturation.\n *\n * @param  a one value\n * @param  b another value\n * @return sum with signed saturation\n */\nstatic av_always_inline int av_sat_add32_c(int a, int b)\n{\n    return av_clipl_int32((int64_t)a + b);\n}\n\n/**\n * Add a doubled value to another value with saturation at both stages.\n *\n * @param  a first value\n * @param  b value doubled and added to a\n * @return sum with signed saturation\n */\nstatic av_always_inline int av_sat_dadd32_c(int a, int b)\n{\n    return av_sat_add32(a, av_sat_add32(b, b));\n}\n\n/**\n * Clip a float value into the amin-amax range.\n * @param a value to clip\n * @param amin minimum value of the clip range\n * @param amax maximum value of the clip range\n * @return clipped value\n */\nstatic av_always_inline av_const float av_clipf_c(float a, float amin, float amax)\n{\n#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2\n    if (amin > amax) abort();\n#endif\n    if      (a < amin) return amin;\n    else if (a > amax) return amax;\n    else               return a;\n}\n\n/**\n * Clip a double value into the amin-amax range.\n * @param a value to clip\n * @param amin minimum value of the clip range\n * @param amax maximum value of the clip range\n * @return clipped value\n */\nstatic av_always_inline av_const double av_clipd_c(double a, double amin, double amax)\n{\n#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2\n    if (amin > amax) abort();\n#endif\n    if      (a < amin) return amin;\n    else if (a > amax) return amax;\n    else               return a;\n}\n\n/** Compute ceil(log2(x)).\n * @param x value used to compute ceil(log2(x))\n * @return computed ceiling of log2(x)\n */\nstatic av_always_inline av_const int av_ceil_log2_c(int x)\n{\n    return av_log2((x - 1) << 1);\n}\n\n/**\n * Count number of bits set to one in x\n * @param x value to count bits of\n * @return the number of bits set to one in x\n */\nstatic av_always_inline av_const int av_popcount_c(uint32_t x)\n{\n    x -= (x >> 1) & 0x55555555;\n    x = (x & 0x33333333) + ((x >> 2) & 0x33333333);\n    x = (x + (x >> 4)) & 0x0F0F0F0F;\n    x += x >> 8;\n    return (x + (x >> 16)) & 0x3F;\n}\n\n/**\n * Count number of bits set to one in x\n * @param x value to count bits of\n * @return the number of bits set to one in x\n */\nstatic av_always_inline av_const int av_popcount64_c(uint64_t x)\n{\n    return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));\n}\n\n#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))\n#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))\n\n/**\n * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.\n *\n * @param val      Output value, must be an lvalue of type uint32_t.\n * @param GET_BYTE Expression reading one byte from the input.\n *                 Evaluated up to 7 times (4 for the currently\n *                 assigned Unicode range).  With a memory buffer\n *                 input, this could be *ptr++.\n * @param ERROR    Expression to be evaluated on invalid input,\n *                 typically a goto statement.\n *\n * @warning ERROR should not contain a loop control statement which\n * could interact with the internal while loop, and should force an\n * exit from the macro code (e.g. through a goto or a return) in order\n * to prevent undefined results.\n */\n#define GET_UTF8(val, GET_BYTE, ERROR)\\\n    val= GET_BYTE;\\\n    {\\\n        uint32_t top = (val & 128) >> 1;\\\n        if ((val & 0xc0) == 0x80 || val >= 0xFE)\\\n            ERROR\\\n        while (val & top) {\\\n            int tmp= GET_BYTE - 128;\\\n            if(tmp>>6)\\\n                ERROR\\\n            val= (val<<6) + tmp;\\\n            top <<= 5;\\\n        }\\\n        val &= (top << 1) - 1;\\\n    }\n\n/**\n * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.\n *\n * @param val       Output value, must be an lvalue of type uint32_t.\n * @param GET_16BIT Expression returning two bytes of UTF-16 data converted\n *                  to native byte order.  Evaluated one or two times.\n * @param ERROR     Expression to be evaluated on invalid input,\n *                  typically a goto statement.\n */\n#define GET_UTF16(val, GET_16BIT, ERROR)\\\n    val = GET_16BIT;\\\n    {\\\n        unsigned int hi = val - 0xD800;\\\n        if (hi < 0x800) {\\\n            val = GET_16BIT - 0xDC00;\\\n            if (val > 0x3FFU || hi > 0x3FFU)\\\n                ERROR\\\n            val += (hi<<10) + 0x10000;\\\n        }\\\n    }\\\n\n/**\n * @def PUT_UTF8(val, tmp, PUT_BYTE)\n * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).\n * @param val is an input-only argument and should be of type uint32_t. It holds\n * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If\n * val is given as a function it is executed only once.\n * @param tmp is a temporary variable and should be of type uint8_t. It\n * represents an intermediate value during conversion that is to be\n * output by PUT_BYTE.\n * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.\n * It could be a function or a statement, and uses tmp as the input byte.\n * For example, PUT_BYTE could be \"*output++ = tmp;\" PUT_BYTE will be\n * executed up to 4 times for values in the valid UTF-8 range and up to\n * 7 times in the general case, depending on the length of the converted\n * Unicode character.\n */\n#define PUT_UTF8(val, tmp, PUT_BYTE)\\\n    {\\\n        int bytes, shift;\\\n        uint32_t in = val;\\\n        if (in < 0x80) {\\\n            tmp = in;\\\n            PUT_BYTE\\\n        } else {\\\n            bytes = (av_log2(in) + 4) / 5;\\\n            shift = (bytes - 1) * 6;\\\n            tmp = (256 - (256 >> bytes)) | (in >> shift);\\\n            PUT_BYTE\\\n            while (shift >= 6) {\\\n                shift -= 6;\\\n                tmp = 0x80 | ((in >> shift) & 0x3f);\\\n                PUT_BYTE\\\n            }\\\n        }\\\n    }\n\n/**\n * @def PUT_UTF16(val, tmp, PUT_16BIT)\n * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).\n * @param val is an input-only argument and should be of type uint32_t. It holds\n * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If\n * val is given as a function it is executed only once.\n * @param tmp is a temporary variable and should be of type uint16_t. It\n * represents an intermediate value during conversion that is to be\n * output by PUT_16BIT.\n * @param PUT_16BIT writes the converted UTF-16 data to any proper destination\n * in desired endianness. It could be a function or a statement, and uses tmp\n * as the input byte.  For example, PUT_BYTE could be \"*output++ = tmp;\"\n * PUT_BYTE will be executed 1 or 2 times depending on input character.\n */\n#define PUT_UTF16(val, tmp, PUT_16BIT)\\\n    {\\\n        uint32_t in = val;\\\n        if (in < 0x10000) {\\\n            tmp = in;\\\n            PUT_16BIT\\\n        } else {\\\n            tmp = 0xD800 | ((in - 0x10000) >> 10);\\\n            PUT_16BIT\\\n            tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\\\n            PUT_16BIT\\\n        }\\\n    }\\\n\n\n\n#include \"mem.h\"\n\n#ifdef HAVE_AV_CONFIG_H\n#    include \"internal.h\"\n#endif /* HAVE_AV_CONFIG_H */\n\n#endif /* AVUTIL_COMMON_H */\n\n/*\n * The following definitions are outside the multiple inclusion guard\n * to ensure they are immediately available in intmath.h.\n */\n\n#ifndef av_ceil_log2\n#   define av_ceil_log2     av_ceil_log2_c\n#endif\n#ifndef av_clip\n#   define av_clip          av_clip_c\n#endif\n#ifndef av_clip64\n#   define av_clip64        av_clip64_c\n#endif\n#ifndef av_clip_uint8\n#   define av_clip_uint8    av_clip_uint8_c\n#endif\n#ifndef av_clip_int8\n#   define av_clip_int8     av_clip_int8_c\n#endif\n#ifndef av_clip_uint16\n#   define av_clip_uint16   av_clip_uint16_c\n#endif\n#ifndef av_clip_int16\n#   define av_clip_int16    av_clip_int16_c\n#endif\n#ifndef av_clipl_int32\n#   define av_clipl_int32   av_clipl_int32_c\n#endif\n#ifndef av_clip_intp2\n#   define av_clip_intp2    av_clip_intp2_c\n#endif\n#ifndef av_clip_uintp2\n#   define av_clip_uintp2   av_clip_uintp2_c\n#endif\n#ifndef av_sat_add32\n#   define av_sat_add32     av_sat_add32_c\n#endif\n#ifndef av_sat_dadd32\n#   define av_sat_dadd32    av_sat_dadd32_c\n#endif\n#ifndef av_clipf\n#   define av_clipf         av_clipf_c\n#endif\n#ifndef av_clipd\n#   define av_clipd         av_clipd_c\n#endif\n#ifndef av_popcount\n#   define av_popcount      av_popcount_c\n#endif\n#ifndef av_popcount64\n#   define av_popcount64    av_popcount64_c\n#endif\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/cpu.h",
    "content": "/*\n * Copyright (c) 2000, 2001, 2002 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_CPU_H\n#define AVUTIL_CPU_H\n\n#include \"attributes.h\"\n\n#define AV_CPU_FLAG_FORCE    0x80000000 /* force usage of selected flags (OR) */\n\n    /* lower 16 bits - CPU features */\n#define AV_CPU_FLAG_MMX          0x0001 ///< standard MMX\n#define AV_CPU_FLAG_MMXEXT       0x0002 ///< SSE integer functions or AMD MMX ext\n#define AV_CPU_FLAG_MMX2         0x0002 ///< SSE integer functions or AMD MMX ext\n#define AV_CPU_FLAG_3DNOW        0x0004 ///< AMD 3DNOW\n#define AV_CPU_FLAG_SSE          0x0008 ///< SSE functions\n#define AV_CPU_FLAG_SSE2         0x0010 ///< PIV SSE2 functions\n#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster\n                                        ///< than regular MMX/SSE (e.g. Core1)\n#define AV_CPU_FLAG_3DNOWEXT     0x0020 ///< AMD 3DNowExt\n#define AV_CPU_FLAG_SSE3         0x0040 ///< Prescott SSE3 functions\n#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster\n                                        ///< than regular MMX/SSE (e.g. Core1)\n#define AV_CPU_FLAG_SSSE3        0x0080 ///< Conroe SSSE3 functions\n#define AV_CPU_FLAG_ATOM     0x10000000 ///< Atom processor, some SSSE3 instructions are slower\n#define AV_CPU_FLAG_SSE4         0x0100 ///< Penryn SSE4.1 functions\n#define AV_CPU_FLAG_SSE42        0x0200 ///< Nehalem SSE4.2 functions\n#define AV_CPU_FLAG_AVX          0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used\n#define AV_CPU_FLAG_XOP          0x0400 ///< Bulldozer XOP functions\n#define AV_CPU_FLAG_FMA4         0x0800 ///< Bulldozer FMA4 functions\n// #if LIBAVUTIL_VERSION_MAJOR <52\n#define AV_CPU_FLAG_CMOV      0x1001000 ///< supports cmov instruction\n// #else\n// #define AV_CPU_FLAG_CMOV         0x1000 ///< supports cmov instruction\n// #endif\n#define AV_CPU_FLAG_AVX2         0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used\n#define AV_CPU_FLAG_FMA3        0x10000 ///< Haswell FMA3 functions\n#define AV_CPU_FLAG_BMI1        0x20000 ///< Bit Manipulation Instruction Set 1\n#define AV_CPU_FLAG_BMI2        0x40000 ///< Bit Manipulation Instruction Set 2\n\n#define AV_CPU_FLAG_ALTIVEC      0x0001 ///< standard\n\n#define AV_CPU_FLAG_ARMV5TE      (1 << 0)\n#define AV_CPU_FLAG_ARMV6        (1 << 1)\n#define AV_CPU_FLAG_ARMV6T2      (1 << 2)\n#define AV_CPU_FLAG_VFP          (1 << 3)\n#define AV_CPU_FLAG_VFPV3        (1 << 4)\n#define AV_CPU_FLAG_NEON         (1 << 5)\n#define AV_CPU_FLAG_ARMV8        (1 << 6)\n#define AV_CPU_FLAG_SETEND       (1 <<16)\n\n/**\n * Return the flags which specify extensions supported by the CPU.\n * The returned value is affected by av_force_cpu_flags() if that was used\n * before. So av_get_cpu_flags() can easily be used in a application to\n * detect the enabled cpu flags.\n */\nint av_get_cpu_flags(void);\n\n/**\n * Disables cpu detection and forces the specified flags.\n * -1 is a special case that disables forcing of specific flags.\n */\nvoid av_force_cpu_flags(int flags);\n\n/**\n * Set a mask on flags returned by av_get_cpu_flags().\n * This function is mainly useful for testing.\n * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible\n *\n * @warning this function is not thread safe.\n */\nattribute_deprecated void av_set_cpu_flags_mask(int mask);\n\n/**\n * Parse CPU flags from a string.\n *\n * The returned flags contain the specified flags as well as related unspecified flags.\n *\n * This function exists only for compatibility with libav.\n * Please use av_parse_cpu_caps() when possible.\n * @return a combination of AV_CPU_* flags, negative on error.\n */\nattribute_deprecated\nint av_parse_cpu_flags(const char *s);\n\n/**\n * Parse CPU caps from a string and update the given AV_CPU_* flags based on that.\n *\n * @return negative on error.\n */\nint av_parse_cpu_caps(unsigned *flags, const char *s);\n\n/**\n * @return the number of logical CPU cores present.\n */\nint av_cpu_count(void);\n\n#endif /* AVUTIL_CPU_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/crc.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_CRC_H\n#define AVUTIL_CRC_H\n\n#include <stdint.h>\n#include <stddef.h>\n#include \"attributes.h\"\n\n/**\n * @defgroup lavu_crc32 CRC32\n * @ingroup lavu_crypto\n * @{\n */\n\ntypedef uint32_t AVCRC;\n\ntypedef enum {\n    AV_CRC_8_ATM,\n    AV_CRC_16_ANSI,\n    AV_CRC_16_CCITT,\n    AV_CRC_32_IEEE,\n    AV_CRC_32_IEEE_LE,  /*< reversed bitorder version of AV_CRC_32_IEEE */\n    AV_CRC_16_ANSI_LE,  /*< reversed bitorder version of AV_CRC_16_ANSI */\n    AV_CRC_24_IEEE = 12,\n    AV_CRC_MAX,         /*< Not part of public API! Do not use outside libavutil. */\n}AVCRCId;\n\n/**\n * Initialize a CRC table.\n * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024\n * @param le If 1, the lowest bit represents the coefficient for the highest\n *           exponent of the corresponding polynomial (both for poly and\n *           actual CRC).\n *           If 0, you must swap the CRC parameter and the result of av_crc\n *           if you need the standard representation (can be simplified in\n *           most cases to e.g. bswap16):\n *           av_bswap32(crc << (32-bits))\n * @param bits number of bits for the CRC\n * @param poly generator polynomial without the x**bits coefficient, in the\n *             representation as specified by le\n * @param ctx_size size of ctx in bytes\n * @return <0 on failure\n */\nint av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);\n\n/**\n * Get an initialized standard CRC table.\n * @param crc_id ID of a standard CRC\n * @return a pointer to the CRC table or NULL on failure\n */\nconst AVCRC *av_crc_get_table(AVCRCId crc_id);\n\n/**\n * Calculate the CRC of a block.\n * @param crc CRC of previous blocks if any or initial value for CRC\n * @return CRC updated with the data from the given block\n *\n * @see av_crc_init() \"le\" parameter\n */\nuint32_t av_crc(const AVCRC *ctx, uint32_t crc,\n                const uint8_t *buffer, size_t length) av_pure;\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_CRC_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/dict.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * Public dictionary API.\n * @deprecated\n *  AVDictionary is provided for compatibility with libav. It is both in\n *  implementation as well as API inefficient. It does not scale and is\n *  extremely slow with large dictionaries.\n *  It is recommended that new code uses our tree container from tree.c/h\n *  where applicable, which uses AVL trees to achieve O(log n) performance.\n */\n\n#ifndef AVUTIL_DICT_H\n#define AVUTIL_DICT_H\n\n#include <stdint.h>\n\n#include \"version.h\"\n\n/**\n * @addtogroup lavu_dict AVDictionary\n * @ingroup lavu_data\n *\n * @brief Simple key:value store\n *\n * @{\n * Dictionaries are used for storing key:value pairs. To create\n * an AVDictionary, simply pass an address of a NULL pointer to\n * av_dict_set(). NULL can be used as an empty dictionary wherever\n * a pointer to an AVDictionary is required.\n * Use av_dict_get() to retrieve an entry or iterate over all\n * entries and finally av_dict_free() to free the dictionary\n * and all its contents.\n *\n @code\n   AVDictionary *d = NULL;           // \"create\" an empty dictionary\n   AVDictionaryEntry *t = NULL;\n\n   av_dict_set(&d, \"foo\", \"bar\", 0); // add an entry\n\n   char *k = av_strdup(\"key\");       // if your strings are already allocated,\n   char *v = av_strdup(\"value\");     // you can avoid copying them like this\n   av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);\n\n   while (t = av_dict_get(d, \"\", t, AV_DICT_IGNORE_SUFFIX)) {\n       <....>                             // iterate over all entries in d\n   }\n   av_dict_free(&d);\n @endcode\n *\n */\n\n#define AV_DICT_MATCH_CASE      1   /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */\n#define AV_DICT_IGNORE_SUFFIX   2   /**< Return first entry in a dictionary whose first part corresponds to the search key,\n                                         ignoring the suffix of the found key string. Only relevant in av_dict_get(). */\n#define AV_DICT_DONT_STRDUP_KEY 4   /**< Take ownership of a key that's been\n                                         allocated with av_malloc() or another memory allocation function. */\n#define AV_DICT_DONT_STRDUP_VAL 8   /**< Take ownership of a value that's been\n                                         allocated with av_malloc() or another memory allocation function. */\n#define AV_DICT_DONT_OVERWRITE 16   ///< Don't overwrite existing entries.\n#define AV_DICT_APPEND         32   /**< If the entry already exists, append to it.  Note that no\n                                      delimiter is added, the strings are simply concatenated. */\n\ntypedef struct AVDictionaryEntry {\n    char *key;\n    char *value;\n} AVDictionaryEntry;\n\ntypedef struct AVDictionary AVDictionary;\n\n/**\n * Get a dictionary entry with matching key.\n *\n * The returned entry key or value must not be changed, or it will\n * cause undefined behavior.\n *\n * To iterate through all the dictionary entries, you can set the matching key\n * to the null string \"\" and set the AV_DICT_IGNORE_SUFFIX flag.\n *\n * @param prev Set to the previous matching element to find the next.\n *             If set to NULL the first matching element is returned.\n * @param key matching key\n * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved\n * @return found entry or NULL in case no matching entry was found in the dictionary\n */\nAVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,\n                               const AVDictionaryEntry *prev, int flags);\n\n/**\n * Get number of entries in dictionary.\n *\n * @param m dictionary\n * @return  number of entries in dictionary\n */\nint av_dict_count(const AVDictionary *m);\n\n/**\n * Set the given entry in *pm, overwriting an existing entry.\n *\n * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,\n * these arguments will be freed on error.\n *\n * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL\n * a dictionary struct is allocated and put in *pm.\n * @param key entry key to add to *pm (will be av_strduped depending on flags)\n * @param value entry value to add to *pm (will be av_strduped depending on flags).\n *        Passing a NULL value will cause an existing entry to be deleted.\n * @return >= 0 on success otherwise an error code <0\n */\nint av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);\n\n/**\n * Convenience wrapper for av_dict_set that converts the value to a string\n * and stores it.\n *\n * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.\n */\nint av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags);\n\n/**\n * Parse the key/value pairs list and add the parsed entries to a dictionary.\n *\n * In case of failure, all the successfully set entries are stored in\n * *pm. You may need to manually free the created dictionary.\n *\n * @param key_val_sep  a 0-terminated list of characters used to separate\n *                     key from value\n * @param pairs_sep    a 0-terminated list of characters used to separate\n *                     two pairs from each other\n * @param flags        flags to use when adding to dictionary.\n *                     AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL\n *                     are ignored since the key/value tokens will always\n *                     be duplicated.\n * @return             0 on success, negative AVERROR code on failure\n */\nint av_dict_parse_string(AVDictionary **pm, const char *str,\n                         const char *key_val_sep, const char *pairs_sep,\n                         int flags);\n\n/**\n * Copy entries from one AVDictionary struct into another.\n * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,\n *            this function will allocate a struct for you and put it in *dst\n * @param src pointer to source AVDictionary struct\n * @param flags flags to use when setting entries in *dst\n * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag\n */\nvoid av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags);\n\n/**\n * Free all the memory allocated for an AVDictionary struct\n * and all keys and values.\n */\nvoid av_dict_free(AVDictionary **m);\n\n/**\n * Get dictionary entries as a string.\n *\n * Create a string containing dictionary's entries.\n * Such string may be passed back to av_dict_parse_string().\n * @note String is escaped with backslashes ('\\').\n *\n * @param[in]  m             dictionary\n * @param[out] buffer        Pointer to buffer that will be allocated with string containg entries.\n *                           Buffer must be freed by the caller when is no longer needed.\n * @param[in]  key_val_sep   character used to separate key from value\n * @param[in]  pairs_sep     character used to separate two pairs from each other\n * @return                   >= 0 on success, negative on error\n * @warning Separators cannot be neither '\\\\' nor '\\0'. They also cannot be the same.\n */\nint av_dict_get_string(const AVDictionary *m, char **buffer,\n                       const char key_val_sep, const char pairs_sep);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_DICT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/display.h",
    "content": "/*\n * Copyright (c) 2014 Vittorio Giovara <vittorio.giovara@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_DISPLAY_H\n#define AVUTIL_DISPLAY_H\n\n#include <stdint.h>\n\n/**\n * The display transformation matrix specifies an affine transformation that\n * should be applied to video frames for correct presentation. It is compatible\n * with the matrices stored in the ISO/IEC 14496-12 container format.\n *\n * The data is a 3x3 matrix represented as a 9-element array:\n *\n *                                  | a b u |\n *   (a, b, u, c, d, v, x, y, w) -> | c d v |\n *                                  | x y w |\n *\n * All numbers are stored in native endianness, as 16.16 fixed-point values,\n * except for u, v and w, which are stored as 2.30 fixed-point values.\n *\n * The transformation maps a point (p, q) in the source (pre-transformation)\n * frame to the point (p', q') in the destination (post-transformation) frame as\n * follows:\n *               | a b u |\n *   (p, q, 1) . | c d v | = z * (p', q', 1)\n *               | x y w |\n *\n * The transformation can also be more explicitly written in components as\n * follows:\n *   p' = (a * p + c * q + x) / z;\n *   q' = (b * p + d * q + y) / z;\n *   z  =  u * p + v * q + w\n */\n\n/**\n * Extract the rotation component of the transformation matrix.\n *\n * @param matrix the transformation matrix\n * @return the angle (in degrees) by which the transformation rotates the frame.\n *         The angle will be in range [-180.0, 180.0], or NaN if the matrix is\n *         singular.\n *\n * @note floating point numbers are inherently inexact, so callers are\n *       recommended to round the return value to nearest integer before use.\n */\ndouble av_display_rotation_get(const int32_t matrix[9]);\n\n/**\n * Initialize a transformation matrix describing a pure rotation by the\n * specified angle (in degrees).\n *\n * @param matrix an allocated transformation matrix (will be fully overwritten\n *               by this function)\n * @param angle rotation angle in degrees.\n */\nvoid av_display_rotation_set(int32_t matrix[9], double angle);\n\n/**\n * Flip the input matrix horizontally and/or vertically.\n *\n * @param matrix an allocated transformation matrix\n * @param hflip whether the matrix should be flipped horizontally\n * @param vflip whether the matrix should be flipped vertically\n */\nvoid av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip);\n\n#endif /* AVUTIL_DISPLAY_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/downmix_info.h",
    "content": "/*\n * Copyright (c) 2014 Tim Walker <tdskywalker@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_DOWNMIX_INFO_H\n#define AVUTIL_DOWNMIX_INFO_H\n\n#include \"frame.h\"\n\n/**\n * @file\n * audio downmix medatata\n */\n\n/**\n * @addtogroup lavu_audio\n * @{\n */\n\n/**\n * @defgroup downmix_info Audio downmix metadata\n * @{\n */\n\n/**\n * Possible downmix types.\n */\nenum AVDownmixType {\n    AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */\n    AV_DOWNMIX_TYPE_LORO,    /**< Lo/Ro 2-channel downmix (Stereo). */\n    AV_DOWNMIX_TYPE_LTRT,    /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */\n    AV_DOWNMIX_TYPE_DPLII,   /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */\n    AV_DOWNMIX_TYPE_NB       /**< Number of downmix types. Not part of ABI. */\n};\n\n/**\n * This structure describes optional metadata relevant to a downmix procedure.\n *\n * All fields are set by the decoder to the value indicated in the audio\n * bitstream (if present), or to a \"sane\" default otherwise.\n */\ntypedef struct AVDownmixInfo {\n    /**\n     * Type of downmix preferred by the mastering engineer.\n     */\n    enum AVDownmixType preferred_downmix_type;\n\n    /**\n     * Absolute scale factor representing the nominal level of the center\n     * channel during a regular downmix.\n     */\n    double center_mix_level;\n\n    /**\n     * Absolute scale factor representing the nominal level of the center\n     * channel during an Lt/Rt compatible downmix.\n     */\n    double center_mix_level_ltrt;\n\n    /**\n     * Absolute scale factor representing the nominal level of the surround\n     * channels during a regular downmix.\n     */\n    double surround_mix_level;\n\n    /**\n     * Absolute scale factor representing the nominal level of the surround\n     * channels during an Lt/Rt compatible downmix.\n     */\n    double surround_mix_level_ltrt;\n\n    /**\n     * Absolute scale factor representing the level at which the LFE data is\n     * mixed into L/R channels during downmixing.\n     */\n    double lfe_mix_level;\n} AVDownmixInfo;\n\n/**\n * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing.\n *\n * If the side data is absent, it is created and added to the frame.\n *\n * @param frame the frame for which the side data is to be obtained or created\n *\n * @return the AVDownmixInfo structure to be edited by the caller, or NULL if\n *         the structure cannot be allocated.\n */\nAVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame);\n\n/**\n * @}\n */\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_DOWNMIX_INFO_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/error.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * error code definitions\n */\n\n#ifndef AVUTIL_ERROR_H\n#define AVUTIL_ERROR_H\n\n#include <errno.h>\n#include <stddef.h>\n\n/**\n * @addtogroup lavu_error\n *\n * @{\n */\n\n\n/* error handling */\n#if EDOM > 0\n#define AVERROR(e) (-(e))   ///< Returns a negative error code from a POSIX error code, to return from library functions.\n#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.\n#else\n/* Some platforms have E* and errno already negated. */\n#define AVERROR(e) (e)\n#define AVUNERROR(e) (e)\n#endif\n\n#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d))\n\n#define AVERROR_BSF_NOT_FOUND      FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found\n#define AVERROR_BUG                FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2\n#define AVERROR_BUFFER_TOO_SMALL   FFERRTAG( 'B','U','F','S') ///< Buffer too small\n#define AVERROR_DECODER_NOT_FOUND  FFERRTAG(0xF8,'D','E','C') ///< Decoder not found\n#define AVERROR_DEMUXER_NOT_FOUND  FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found\n#define AVERROR_ENCODER_NOT_FOUND  FFERRTAG(0xF8,'E','N','C') ///< Encoder not found\n#define AVERROR_EOF                FFERRTAG( 'E','O','F',' ') ///< End of file\n#define AVERROR_EXIT               FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted\n#define AVERROR_EXTERNAL           FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library\n#define AVERROR_FILTER_NOT_FOUND   FFERRTAG(0xF8,'F','I','L') ///< Filter not found\n#define AVERROR_INVALIDDATA        FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input\n#define AVERROR_MUXER_NOT_FOUND    FFERRTAG(0xF8,'M','U','X') ///< Muxer not found\n#define AVERROR_OPTION_NOT_FOUND   FFERRTAG(0xF8,'O','P','T') ///< Option not found\n#define AVERROR_PATCHWELCOME       FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome\n#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found\n\n#define AVERROR_STREAM_NOT_FOUND   FFERRTAG(0xF8,'S','T','R') ///< Stream not found\n/**\n * This is semantically identical to AVERROR_BUG\n * it has been introduced in Libav after our AVERROR_BUG and with a modified value.\n */\n#define AVERROR_BUG2               FFERRTAG( 'B','U','G',' ')\n#define AVERROR_UNKNOWN            FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library\n#define AVERROR_EXPERIMENTAL       (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.\n#define AVERROR_INPUT_CHANGED      (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED)\n#define AVERROR_OUTPUT_CHANGED     (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED)\n/* HTTP & RTSP errors */\n#define AVERROR_HTTP_BAD_REQUEST   FFERRTAG(0xF8,'4','0','0')\n#define AVERROR_HTTP_UNAUTHORIZED  FFERRTAG(0xF8,'4','0','1')\n#define AVERROR_HTTP_FORBIDDEN     FFERRTAG(0xF8,'4','0','3')\n#define AVERROR_HTTP_NOT_FOUND     FFERRTAG(0xF8,'4','0','4')\n#define AVERROR_HTTP_OTHER_4XX     FFERRTAG(0xF8,'4','X','X')\n#define AVERROR_HTTP_SERVER_ERROR  FFERRTAG(0xF8,'5','X','X')\n\n#define AV_ERROR_MAX_STRING_SIZE 64\n\n/**\n * Put a description of the AVERROR code errnum in errbuf.\n * In case of failure the global variable errno is set to indicate the\n * error. Even in case of failure av_strerror() will print a generic\n * error message indicating the errnum provided to errbuf.\n *\n * @param errnum      error code to describe\n * @param errbuf      buffer to which description is written\n * @param errbuf_size the size in bytes of errbuf\n * @return 0 on success, a negative value if a description for errnum\n * cannot be found\n */\nint av_strerror(int errnum, char *errbuf, size_t errbuf_size);\n\n/**\n * Fill the provided buffer with a string containing an error string\n * corresponding to the AVERROR code errnum.\n *\n * @param errbuf         a buffer\n * @param errbuf_size    size in bytes of errbuf\n * @param errnum         error code to describe\n * @return the buffer in input, filled with the error description\n * @see av_strerror()\n */\nstatic inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum)\n{\n    av_strerror(errnum, errbuf, errbuf_size);\n    return errbuf;\n}\n\n/**\n * Convenience macro, the return value should be used only directly in\n * function arguments but never stand-alone.\n */\n#define av_err2str(errnum) \\\n    av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_ERROR_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/eval.h",
    "content": "/*\n * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * simple arithmetic expression evaluator\n */\n\n#ifndef AVUTIL_EVAL_H\n#define AVUTIL_EVAL_H\n\n#include \"avutil.h\"\n\ntypedef struct AVExpr AVExpr;\n\n/**\n * Parse and evaluate an expression.\n * Note, this is significantly slower than av_expr_eval().\n *\n * @param res a pointer to a double where is put the result value of\n * the expression, or NAN in case of error\n * @param s expression as a zero terminated string, for example \"1+2^3+5*5+sin(2/3)\"\n * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {\"PI\", \"E\", 0}\n * @param const_values a zero terminated array of values for the identifiers from const_names\n * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers\n * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument\n * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers\n * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments\n * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2\n * @param log_ctx parent logging context\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code otherwise\n */\nint av_expr_parse_and_eval(double *res, const char *s,\n                           const char * const *const_names, const double *const_values,\n                           const char * const *func1_names, double (* const *funcs1)(void *, double),\n                           const char * const *func2_names, double (* const *funcs2)(void *, double, double),\n                           void *opaque, int log_offset, void *log_ctx);\n\n/**\n * Parse an expression.\n *\n * @param expr a pointer where is put an AVExpr containing the parsed\n * value in case of successful parsing, or NULL otherwise.\n * The pointed to AVExpr must be freed with av_expr_free() by the user\n * when it is not needed anymore.\n * @param s expression as a zero terminated string, for example \"1+2^3+5*5+sin(2/3)\"\n * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {\"PI\", \"E\", 0}\n * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers\n * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument\n * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers\n * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments\n * @param log_ctx parent logging context\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code otherwise\n */\nint av_expr_parse(AVExpr **expr, const char *s,\n                  const char * const *const_names,\n                  const char * const *func1_names, double (* const *funcs1)(void *, double),\n                  const char * const *func2_names, double (* const *funcs2)(void *, double, double),\n                  int log_offset, void *log_ctx);\n\n/**\n * Evaluate a previously parsed expression.\n *\n * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names\n * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2\n * @return the value of the expression\n */\ndouble av_expr_eval(AVExpr *e, const double *const_values, void *opaque);\n\n/**\n * Free a parsed expression previously created with av_expr_parse().\n */\nvoid av_expr_free(AVExpr *e);\n\n/**\n * Parse the string in numstr and return its value as a double. If\n * the string is empty, contains only whitespaces, or does not contain\n * an initial substring that has the expected syntax for a\n * floating-point number, no conversion is performed. In this case,\n * returns a value of zero and the value returned in tail is the value\n * of numstr.\n *\n * @param numstr a string representing a number, may contain one of\n * the International System number postfixes, for example 'K', 'M',\n * 'G'. If 'i' is appended after the postfix, powers of 2 are used\n * instead of powers of 10. The 'B' postfix multiplies the value for\n * 8, and can be appended after another postfix or used alone. This\n * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.\n * @param tail if non-NULL puts here the pointer to the char next\n * after the last parsed character\n */\ndouble av_strtod(const char *numstr, char **tail);\n\n#endif /* AVUTIL_EVAL_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/ffversion.h",
    "content": "#ifndef AVUTIL_FFVERSION_H\n#define AVUTIL_FFVERSION_H\n#define FFMPEG_VERSION \"2.6\"\n#endif /* AVUTIL_FFVERSION_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/fifo.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * a very simple circular buffer FIFO implementation\n */\n\n#ifndef AVUTIL_FIFO_H\n#define AVUTIL_FIFO_H\n\n#include <stdint.h>\n#include \"avutil.h\"\n#include \"attributes.h\"\n\ntypedef struct AVFifoBuffer {\n    uint8_t *buffer;\n    uint8_t *rptr, *wptr, *end;\n    uint32_t rndx, wndx;\n} AVFifoBuffer;\n\n/**\n * Initialize an AVFifoBuffer.\n * @param size of FIFO\n * @return AVFifoBuffer or NULL in case of memory allocation failure\n */\nAVFifoBuffer *av_fifo_alloc(unsigned int size);\n\n/**\n * Initialize an AVFifoBuffer.\n * @param nmemb number of elements\n * @param size  size of the single element\n * @return AVFifoBuffer or NULL in case of memory allocation failure\n */\nAVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size);\n\n/**\n * Free an AVFifoBuffer.\n * @param f AVFifoBuffer to free\n */\nvoid av_fifo_free(AVFifoBuffer *f);\n\n/**\n * Free an AVFifoBuffer and reset pointer to NULL.\n * @param f AVFifoBuffer to free\n */\nvoid av_fifo_freep(AVFifoBuffer **f);\n\n/**\n * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.\n * @param f AVFifoBuffer to reset\n */\nvoid av_fifo_reset(AVFifoBuffer *f);\n\n/**\n * Return the amount of data in bytes in the AVFifoBuffer, that is the\n * amount of data you can read from it.\n * @param f AVFifoBuffer to read from\n * @return size\n */\nint av_fifo_size(const AVFifoBuffer *f);\n\n/**\n * Return the amount of space in bytes in the AVFifoBuffer, that is the\n * amount of data you can write into it.\n * @param f AVFifoBuffer to write into\n * @return size\n */\nint av_fifo_space(const AVFifoBuffer *f);\n\n/**\n * Feed data from an AVFifoBuffer to a user-supplied callback.\n * @param f AVFifoBuffer to read from\n * @param buf_size number of bytes to read\n * @param func generic read function\n * @param dest data destination\n */\nint av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));\n\n/**\n * Feed data from a user-supplied callback to an AVFifoBuffer.\n * @param f AVFifoBuffer to write to\n * @param src data source; non-const since it may be used as a\n * modifiable context by the function defined in func\n * @param size number of bytes to write\n * @param func generic write function; the first parameter is src,\n * the second is dest_buf, the third is dest_buf_size.\n * func must return the number of bytes written to dest_buf, or <= 0 to\n * indicate no more data available to write.\n * If func is NULL, src is interpreted as a simple byte array for source data.\n * @return the number of bytes written to the FIFO\n */\nint av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));\n\n/**\n * Resize an AVFifoBuffer.\n * In case of reallocation failure, the old FIFO is kept unchanged.\n *\n * @param f AVFifoBuffer to resize\n * @param size new AVFifoBuffer size in bytes\n * @return <0 for failure, >=0 otherwise\n */\nint av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);\n\n/**\n * Enlarge an AVFifoBuffer.\n * In case of reallocation failure, the old FIFO is kept unchanged.\n * The new fifo size may be larger than the requested size.\n *\n * @param f AVFifoBuffer to resize\n * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size()\n * @return <0 for failure, >=0 otherwise\n */\nint av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space);\n\n/**\n * Read and discard the specified amount of data from an AVFifoBuffer.\n * @param f AVFifoBuffer to read from\n * @param size amount of data to read in bytes\n */\nvoid av_fifo_drain(AVFifoBuffer *f, int size);\n\n/**\n * Return a pointer to the data stored in a FIFO buffer at a certain offset.\n * The FIFO buffer is not modified.\n *\n * @param f    AVFifoBuffer to peek at, f must be non-NULL\n * @param offs an offset in bytes, its absolute value must be less\n *             than the used buffer size or the returned pointer will\n *             point outside to the buffer data.\n *             The used buffer size can be checked with av_fifo_size().\n */\nstatic inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)\n{\n    uint8_t *ptr = f->rptr + offs;\n    if (ptr >= f->end)\n        ptr = f->buffer + (ptr - f->end);\n    else if (ptr < f->buffer)\n        ptr = f->end - (f->buffer - ptr);\n    return ptr;\n}\n\n#endif /* AVUTIL_FIFO_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/file.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_FILE_H\n#define AVUTIL_FILE_H\n\n#include <stdint.h>\n\n#include \"avutil.h\"\n\n/**\n * @file\n * Misc file utilities.\n */\n\n/**\n * Read the file with name filename, and put its content in a newly\n * allocated buffer or map it with mmap() when available.\n * In case of success set *bufptr to the read or mmapped buffer, and\n * *size to the size in bytes of the buffer in *bufptr.\n * The returned buffer must be released with av_file_unmap().\n *\n * @param log_offset loglevel offset used for logging\n * @param log_ctx context used for logging\n * @return a non negative number in case of success, a negative value\n * corresponding to an AVERROR error code in case of failure\n */\nint av_file_map(const char *filename, uint8_t **bufptr, size_t *size,\n                int log_offset, void *log_ctx);\n\n/**\n * Unmap or free the buffer bufptr created by av_file_map().\n *\n * @param size size in bytes of bufptr, must be the same as returned\n * by av_file_map()\n */\nvoid av_file_unmap(uint8_t *bufptr, size_t size);\n\n/**\n * Wrapper to work around the lack of mkstemp() on mingw.\n * Also, tries to create file in /tmp first, if possible.\n * *prefix can be a character constant; *filename will be allocated internally.\n * @return file descriptor of opened file (or negative value corresponding to an\n * AVERROR code on error)\n * and opened file name in **filename.\n * @note On very old libcs it is necessary to set a secure umask before\n *       calling this, av_tempfile() can't call umask itself as it is used in\n *       libraries and could interfere with the calling application.\n */\nint av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx);\n\n#endif /* AVUTIL_FILE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/frame.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * @ingroup lavu_frame\n * reference-counted frame API\n */\n\n#ifndef AVUTIL_FRAME_H\n#define AVUTIL_FRAME_H\n\n#include <stdint.h>\n\n#include \"avutil.h\"\n#include \"buffer.h\"\n#include \"dict.h\"\n#include \"rational.h\"\n#include \"samplefmt.h\"\n#include \"pixfmt.h\"\n#include \"version.h\"\n\n\n/**\n * @defgroup lavu_frame AVFrame\n * @ingroup lavu_data\n *\n * @{\n * AVFrame is an abstraction for reference-counted raw multimedia data.\n */\n\nenum AVFrameSideDataType {\n    /**\n     * The data is the AVPanScan struct defined in libavcodec.\n     */\n    AV_FRAME_DATA_PANSCAN,\n    /**\n     * ATSC A53 Part 4 Closed Captions.\n     * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.\n     * The number of bytes of CC data is AVFrameSideData.size.\n     */\n    AV_FRAME_DATA_A53_CC,\n    /**\n     * Stereoscopic 3d metadata.\n     * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.\n     */\n    AV_FRAME_DATA_STEREO3D,\n    /**\n     * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.\n     */\n    AV_FRAME_DATA_MATRIXENCODING,\n    /**\n     * Metadata relevant to a downmix procedure.\n     * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.\n     */\n    AV_FRAME_DATA_DOWNMIX_INFO,\n    /**\n     * ReplayGain information in the form of the AVReplayGain struct.\n     */\n    AV_FRAME_DATA_REPLAYGAIN,\n    /**\n     * This side data contains a 3x3 transformation matrix describing an affine\n     * transformation that needs to be applied to the frame for correct\n     * presentation.\n     *\n     * See libavutil/display.h for a detailed description of the data.\n     */\n    AV_FRAME_DATA_DISPLAYMATRIX,\n    /**\n     * Active Format Description data consisting of a single byte as specified\n     * in ETSI TS 101 154 using AVActiveFormatDescription enum.\n     */\n    AV_FRAME_DATA_AFD,\n    /**\n     * Motion vectors exported by some codecs (on demand through the export_mvs\n     * flag set in the libavcodec AVCodecContext flags2 option).\n     * The data is the AVMotionVector struct defined in\n     * libavutil/motion_vector.h.\n     */\n    AV_FRAME_DATA_MOTION_VECTORS,\n    /**\n     * Recommmends skipping the specified number of samples. This is exported\n     * only if the \"skip_manual\" AVOption is set in libavcodec.\n     * This has the same format as AV_PKT_DATA_SKIP_SAMPLES.\n     * @code\n     * u32le number of samples to skip from start of this packet\n     * u32le number of samples to skip from end of this packet\n     * u8    reason for start skip\n     * u8    reason for end   skip (0=padding silence, 1=convergence)\n     * @endcode\n     */\n    AV_FRAME_DATA_SKIP_SAMPLES,\n\n    /**\n     * This side data must be associated with an audio frame and corresponds to\n     * enum AVAudioServiceType defined in avcodec.h.\n     */\n    AV_FRAME_DATA_AUDIO_SERVICE_TYPE,\n};\n\nenum AVActiveFormatDescription {\n    AV_AFD_SAME         = 8,\n    AV_AFD_4_3          = 9,\n    AV_AFD_16_9         = 10,\n    AV_AFD_14_9         = 11,\n    AV_AFD_4_3_SP_14_9  = 13,\n    AV_AFD_16_9_SP_14_9 = 14,\n    AV_AFD_SP_4_3       = 15,\n};\n\ntypedef struct AVFrameSideData {\n    enum AVFrameSideDataType type;\n    uint8_t *data;\n    int      size;\n    AVDictionary *metadata;\n} AVFrameSideData;\n\n/**\n * This structure describes decoded (raw) audio or video data.\n *\n * AVFrame must be allocated using av_frame_alloc(). Note that this only\n * allocates the AVFrame itself, the buffers for the data must be managed\n * through other means (see below).\n * AVFrame must be freed with av_frame_free().\n *\n * AVFrame is typically allocated once and then reused multiple times to hold\n * different data (e.g. a single AVFrame to hold frames received from a\n * decoder). In such a case, av_frame_unref() will free any references held by\n * the frame and reset it to its original clean state before it\n * is reused again.\n *\n * The data described by an AVFrame is usually reference counted through the\n * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /\n * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at\n * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,\n * every single data plane must be contained in one of the buffers in\n * AVFrame.buf or AVFrame.extended_buf.\n * There may be a single buffer for all the data, or one separate buffer for\n * each plane, or anything in between.\n *\n * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added\n * to the end with a minor bump.\n * Similarly fields that are marked as to be only accessed by\n * av_opt_ptr() can be reordered. This allows 2 forks to add fields\n * without breaking compatibility with each other.\n */\ntypedef struct AVFrame {\n#define AV_NUM_DATA_POINTERS 8\n    /**\n     * pointer to the picture/channel planes.\n     * This might be different from the first allocated byte\n     *\n     * Some decoders access areas outside 0,0 - width,height, please\n     * see avcodec_align_dimensions2(). Some filters and swscale can read\n     * up to 16 bytes beyond the planes, if these filters are to be used,\n     * then 16 extra bytes must be allocated.\n     */\n    uint8_t *data[AV_NUM_DATA_POINTERS];\n\n    /**\n     * For video, size in bytes of each picture line.\n     * For audio, size in bytes of each plane.\n     *\n     * For audio, only linesize[0] may be set. For planar audio, each channel\n     * plane must be the same size.\n     *\n     * For video the linesizes should be multiples of the CPUs alignment\n     * preference, this is 16 or 32 for modern desktop CPUs.\n     * Some code requires such alignment other code can be slower without\n     * correct alignment, for yet other it makes no difference.\n     *\n     * @note The linesize may be larger than the size of usable data -- there\n     * may be extra padding present for performance reasons.\n     */\n    int linesize[AV_NUM_DATA_POINTERS];\n\n    /**\n     * pointers to the data planes/channels.\n     *\n     * For video, this should simply point to data[].\n     *\n     * For planar audio, each channel has a separate data pointer, and\n     * linesize[0] contains the size of each channel buffer.\n     * For packed audio, there is just one data pointer, and linesize[0]\n     * contains the total size of the buffer for all channels.\n     *\n     * Note: Both data and extended_data should always be set in a valid frame,\n     * but for planar audio with more channels that can fit in data,\n     * extended_data must be used in order to access all channels.\n     */\n    uint8_t **extended_data;\n\n    /**\n     * width and height of the video frame\n     */\n    int width, height;\n\n    /**\n     * number of audio samples (per channel) described by this frame\n     */\n    int nb_samples;\n\n    /**\n     * format of the frame, -1 if unknown or unset\n     * Values correspond to enum AVPixelFormat for video frames,\n     * enum AVSampleFormat for audio)\n     */\n    int format;\n\n    /**\n     * 1 -> keyframe, 0-> not\n     */\n    int key_frame;\n\n    /**\n     * Picture type of the frame.\n     */\n    enum AVPictureType pict_type;\n\n#if FF_API_AVFRAME_LAVC\n    attribute_deprecated\n    uint8_t *base[AV_NUM_DATA_POINTERS];\n#endif\n\n    /**\n     * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.\n     */\n    AVRational sample_aspect_ratio;\n\n    /**\n     * Presentation timestamp in time_base units (time when frame should be shown to user).\n     */\n    int64_t pts;\n\n    /**\n     * PTS copied from the AVPacket that was decoded to produce this frame.\n     */\n    int64_t pkt_pts;\n\n    /**\n     * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)\n     * This is also the Presentation time of this AVFrame calculated from\n     * only AVPacket.dts values without pts values.\n     */\n    int64_t pkt_dts;\n\n    /**\n     * picture number in bitstream order\n     */\n    int coded_picture_number;\n    /**\n     * picture number in display order\n     */\n    int display_picture_number;\n\n    /**\n     * quality (between 1 (good) and FF_LAMBDA_MAX (bad))\n     */\n    int quality;\n\n#if FF_API_AVFRAME_LAVC\n    attribute_deprecated\n    int reference;\n\n    /**\n     * QP table\n     */\n    attribute_deprecated\n    int8_t *qscale_table;\n    /**\n     * QP store stride\n     */\n    attribute_deprecated\n    int qstride;\n\n    attribute_deprecated\n    int qscale_type;\n\n    /**\n     * mbskip_table[mb]>=1 if MB didn't change\n     * stride= mb_width = (width+15)>>4\n     */\n    attribute_deprecated\n    uint8_t *mbskip_table;\n\n    /**\n     * motion vector table\n     * @code\n     * example:\n     * int mv_sample_log2= 4 - motion_subsample_log2;\n     * int mb_width= (width+15)>>4;\n     * int mv_stride= (mb_width << mv_sample_log2) + 1;\n     * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];\n     * @endcode\n     */\n    int16_t (*motion_val[2])[2];\n\n    /**\n     * macroblock type table\n     * mb_type_base + mb_width + 2\n     */\n    attribute_deprecated\n    uint32_t *mb_type;\n\n    /**\n     * DCT coefficients\n     */\n    attribute_deprecated\n    short *dct_coeff;\n\n    /**\n     * motion reference frame index\n     * the order in which these are stored can depend on the codec.\n     */\n    attribute_deprecated\n    int8_t *ref_index[2];\n#endif\n\n    /**\n     * for some private data of the user\n     */\n    void *opaque;\n\n    /**\n     * error\n     */\n    uint64_t error[AV_NUM_DATA_POINTERS];\n\n#if FF_API_AVFRAME_LAVC\n    attribute_deprecated\n    int type;\n#endif\n\n    /**\n     * When decoding, this signals how much the picture must be delayed.\n     * extra_delay = repeat_pict / (2*fps)\n     */\n    int repeat_pict;\n\n    /**\n     * The content of the picture is interlaced.\n     */\n    int interlaced_frame;\n\n    /**\n     * If the content is interlaced, is top field displayed first.\n     */\n    int top_field_first;\n\n    /**\n     * Tell user application that palette has changed from previous frame.\n     */\n    int palette_has_changed;\n\n#if FF_API_AVFRAME_LAVC\n    attribute_deprecated\n    int buffer_hints;\n\n    /**\n     * Pan scan.\n     */\n    attribute_deprecated\n    struct AVPanScan *pan_scan;\n#endif\n\n    /**\n     * reordered opaque 64bit (generally an integer or a double precision float\n     * PTS but can be anything).\n     * The user sets AVCodecContext.reordered_opaque to represent the input at\n     * that time,\n     * the decoder reorders values as needed and sets AVFrame.reordered_opaque\n     * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque\n     * @deprecated in favor of pkt_pts\n     */\n    int64_t reordered_opaque;\n\n#if FF_API_AVFRAME_LAVC\n    /**\n     * @deprecated this field is unused\n     */\n    attribute_deprecated void *hwaccel_picture_private;\n\n    attribute_deprecated\n    struct AVCodecContext *owner;\n    attribute_deprecated\n    void *thread_opaque;\n\n    /**\n     * log2 of the size of the block which a single vector in motion_val represents:\n     * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)\n     */\n    uint8_t motion_subsample_log2;\n#endif\n\n    /**\n     * Sample rate of the audio data.\n     */\n    int sample_rate;\n\n    /**\n     * Channel layout of the audio data.\n     */\n    uint64_t channel_layout;\n\n    /**\n     * AVBuffer references backing the data for this frame. If all elements of\n     * this array are NULL, then this frame is not reference counted. This array\n     * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must\n     * also be non-NULL for all j < i.\n     *\n     * There may be at most one AVBuffer per data plane, so for video this array\n     * always contains all the references. For planar audio with more than\n     * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in\n     * this array. Then the extra AVBufferRef pointers are stored in the\n     * extended_buf array.\n     */\n    AVBufferRef *buf[AV_NUM_DATA_POINTERS];\n\n    /**\n     * For planar audio which requires more than AV_NUM_DATA_POINTERS\n     * AVBufferRef pointers, this array will hold all the references which\n     * cannot fit into AVFrame.buf.\n     *\n     * Note that this is different from AVFrame.extended_data, which always\n     * contains all the pointers. This array only contains the extra pointers,\n     * which cannot fit into AVFrame.buf.\n     *\n     * This array is always allocated using av_malloc() by whoever constructs\n     * the frame. It is freed in av_frame_unref().\n     */\n    AVBufferRef **extended_buf;\n    /**\n     * Number of elements in extended_buf.\n     */\n    int        nb_extended_buf;\n\n    AVFrameSideData **side_data;\n    int            nb_side_data;\n\n/**\n * @defgroup lavu_frame_flags AV_FRAME_FLAGS\n * Flags describing additional frame properties.\n *\n * @{\n */\n\n/**\n * The frame data may be corrupted, e.g. due to decoding errors.\n */\n#define AV_FRAME_FLAG_CORRUPT       (1 << 0)\n/**\n * @}\n */\n\n    /**\n     * Frame flags, a combination of @ref lavu_frame_flags\n     */\n    int flags;\n\n    /**\n     * MPEG vs JPEG YUV range.\n     * It must be accessed using av_frame_get_color_range() and\n     * av_frame_set_color_range().\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorRange color_range;\n\n    enum AVColorPrimaries color_primaries;\n\n    enum AVColorTransferCharacteristic color_trc;\n\n    /**\n     * YUV colorspace type.\n     * It must be accessed using av_frame_get_colorspace() and\n     * av_frame_set_colorspace().\n     * - encoding: Set by user\n     * - decoding: Set by libavcodec\n     */\n    enum AVColorSpace colorspace;\n\n    enum AVChromaLocation chroma_location;\n\n    /**\n     * frame timestamp estimated using various heuristics, in stream time base\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_best_effort_timestamp(frame)\n     * - encoding: unused\n     * - decoding: set by libavcodec, read by user.\n     */\n    int64_t best_effort_timestamp;\n\n    /**\n     * reordered pos from the last AVPacket that has been input into the decoder\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_pkt_pos(frame)\n     * - encoding: unused\n     * - decoding: Read by user.\n     */\n    int64_t pkt_pos;\n\n    /**\n     * duration of the corresponding packet, expressed in\n     * AVStream->time_base units, 0 if unknown.\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_pkt_duration(frame)\n     * - encoding: unused\n     * - decoding: Read by user.\n     */\n    int64_t pkt_duration;\n\n    /**\n     * metadata.\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_metadata(frame)\n     * - encoding: Set by user.\n     * - decoding: Set by libavcodec.\n     */\n    AVDictionary *metadata;\n\n    /**\n     * decode error flags of the frame, set to a combination of\n     * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there\n     * were errors during the decoding.\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_decode_error_flags(frame)\n     * - encoding: unused\n     * - decoding: set by libavcodec, read by user.\n     */\n    int decode_error_flags;\n#define FF_DECODE_ERROR_INVALID_BITSTREAM   1\n#define FF_DECODE_ERROR_MISSING_REFERENCE   2\n\n    /**\n     * number of audio channels, only used for audio.\n     * Code outside libavcodec should access this field using:\n     * av_frame_get_channels(frame)\n     * - encoding: unused\n     * - decoding: Read by user.\n     */\n    int channels;\n\n    /**\n     * size of the corresponding packet containing the compressed\n     * frame. It must be accessed using av_frame_get_pkt_size() and\n     * av_frame_set_pkt_size().\n     * It is set to a negative value if unknown.\n     * - encoding: unused\n     * - decoding: set by libavcodec, read by user.\n     */\n    int pkt_size;\n\n    /**\n     * Not to be accessed directly from outside libavutil\n     */\n    AVBufferRef *qp_table_buf;\n} AVFrame;\n\n/**\n * Accessors for some AVFrame fields.\n * The position of these field in the structure is not part of the ABI,\n * they should not be accessed directly outside libavcodec.\n */\nint64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);\nvoid    av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val);\nint64_t av_frame_get_pkt_duration         (const AVFrame *frame);\nvoid    av_frame_set_pkt_duration         (AVFrame *frame, int64_t val);\nint64_t av_frame_get_pkt_pos              (const AVFrame *frame);\nvoid    av_frame_set_pkt_pos              (AVFrame *frame, int64_t val);\nint64_t av_frame_get_channel_layout       (const AVFrame *frame);\nvoid    av_frame_set_channel_layout       (AVFrame *frame, int64_t val);\nint     av_frame_get_channels             (const AVFrame *frame);\nvoid    av_frame_set_channels             (AVFrame *frame, int     val);\nint     av_frame_get_sample_rate          (const AVFrame *frame);\nvoid    av_frame_set_sample_rate          (AVFrame *frame, int     val);\nAVDictionary *av_frame_get_metadata       (const AVFrame *frame);\nvoid          av_frame_set_metadata       (AVFrame *frame, AVDictionary *val);\nint     av_frame_get_decode_error_flags   (const AVFrame *frame);\nvoid    av_frame_set_decode_error_flags   (AVFrame *frame, int     val);\nint     av_frame_get_pkt_size(const AVFrame *frame);\nvoid    av_frame_set_pkt_size(AVFrame *frame, int val);\nAVDictionary **avpriv_frame_get_metadatap(AVFrame *frame);\nint8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type);\nint av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type);\nenum AVColorSpace av_frame_get_colorspace(const AVFrame *frame);\nvoid    av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val);\nenum AVColorRange av_frame_get_color_range(const AVFrame *frame);\nvoid    av_frame_set_color_range(AVFrame *frame, enum AVColorRange val);\n\n/**\n * Get the name of a colorspace.\n * @return a static string identifying the colorspace; can be NULL.\n */\nconst char *av_get_colorspace_name(enum AVColorSpace val);\n\n/**\n * Allocate an AVFrame and set its fields to default values.  The resulting\n * struct must be freed using av_frame_free().\n *\n * @return An AVFrame filled with default values or NULL on failure.\n *\n * @note this only allocates the AVFrame itself, not the data buffers. Those\n * must be allocated through other means, e.g. with av_frame_get_buffer() or\n * manually.\n */\nAVFrame *av_frame_alloc(void);\n\n/**\n * Free the frame and any dynamically allocated objects in it,\n * e.g. extended_data. If the frame is reference counted, it will be\n * unreferenced first.\n *\n * @param frame frame to be freed. The pointer will be set to NULL.\n */\nvoid av_frame_free(AVFrame **frame);\n\n/**\n * Set up a new reference to the data described by the source frame.\n *\n * Copy frame properties from src to dst and create a new reference for each\n * AVBufferRef from src.\n *\n * If src is not reference counted, new buffers are allocated and the data is\n * copied.\n *\n * @return 0 on success, a negative AVERROR on error\n */\nint av_frame_ref(AVFrame *dst, const AVFrame *src);\n\n/**\n * Create a new frame that references the same data as src.\n *\n * This is a shortcut for av_frame_alloc()+av_frame_ref().\n *\n * @return newly created AVFrame on success, NULL on error.\n */\nAVFrame *av_frame_clone(const AVFrame *src);\n\n/**\n * Unreference all the buffers referenced by frame and reset the frame fields.\n */\nvoid av_frame_unref(AVFrame *frame);\n\n/**\n * Move everythnig contained in src to dst and reset src.\n */\nvoid av_frame_move_ref(AVFrame *dst, AVFrame *src);\n\n/**\n * Allocate new buffer(s) for audio or video data.\n *\n * The following fields must be set on frame before calling this function:\n * - format (pixel format for video, sample format for audio)\n * - width and height for video\n * - nb_samples and channel_layout for audio\n *\n * This function will fill AVFrame.data and AVFrame.buf arrays and, if\n * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.\n * For planar formats, one buffer will be allocated for each plane.\n *\n * @param frame frame in which to store the new buffers.\n * @param align required buffer size alignment\n *\n * @return 0 on success, a negative AVERROR on error.\n */\nint av_frame_get_buffer(AVFrame *frame, int align);\n\n/**\n * Check if the frame data is writable.\n *\n * @return A positive value if the frame data is writable (which is true if and\n * only if each of the underlying buffers has only one reference, namely the one\n * stored in this frame). Return 0 otherwise.\n *\n * If 1 is returned the answer is valid until av_buffer_ref() is called on any\n * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).\n *\n * @see av_frame_make_writable(), av_buffer_is_writable()\n */\nint av_frame_is_writable(AVFrame *frame);\n\n/**\n * Ensure that the frame data is writable, avoiding data copy if possible.\n *\n * Do nothing if the frame is writable, allocate new buffers and copy the data\n * if it is not.\n *\n * @return 0 on success, a negative AVERROR on error.\n *\n * @see av_frame_is_writable(), av_buffer_is_writable(),\n * av_buffer_make_writable()\n */\nint av_frame_make_writable(AVFrame *frame);\n\n/**\n * Copy the frame data from src to dst.\n *\n * This function does not allocate anything, dst must be already initialized and\n * allocated with the same parameters as src.\n *\n * This function only copies the frame data (i.e. the contents of the data /\n * extended data arrays), not any other properties.\n *\n * @return >= 0 on success, a negative AVERROR on error.\n */\nint av_frame_copy(AVFrame *dst, const AVFrame *src);\n\n/**\n * Copy only \"metadata\" fields from src to dst.\n *\n * Metadata for the purpose of this function are those fields that do not affect\n * the data layout in the buffers.  E.g. pts, sample rate (for audio) or sample\n * aspect ratio (for video), but not width/height or channel layout.\n * Side data is also copied.\n */\nint av_frame_copy_props(AVFrame *dst, const AVFrame *src);\n\n/**\n * Get the buffer reference a given data plane is stored in.\n *\n * @param plane index of the data plane of interest in frame->extended_data.\n *\n * @return the buffer reference that contains the plane or NULL if the input\n * frame is not valid.\n */\nAVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);\n\n/**\n * Add a new side data to a frame.\n *\n * @param frame a frame to which the side data should be added\n * @param type type of the added side data\n * @param size size of the side data\n *\n * @return newly added side data on success, NULL on error\n */\nAVFrameSideData *av_frame_new_side_data(AVFrame *frame,\n                                        enum AVFrameSideDataType type,\n                                        int size);\n\n/**\n * @return a pointer to the side data of a given type on success, NULL if there\n * is no side data with such type in this frame.\n */\nAVFrameSideData *av_frame_get_side_data(const AVFrame *frame,\n                                        enum AVFrameSideDataType type);\n\n/**\n * If side data of the supplied type exists in the frame, free it and remove it\n * from the frame.\n */\nvoid av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);\n\n/**\n * @return a string identifying the side data type\n */\nconst char *av_frame_side_data_name(enum AVFrameSideDataType type);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_FRAME_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/hash.h",
    "content": "/*\n * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_HASH_H\n#define AVUTIL_HASH_H\n\n#include <stdint.h>\n\nstruct AVHashContext;\n\n/**\n * Allocate a hash context for the algorithm specified by name.\n *\n * @return  >= 0 for success, a negative error code for failure\n * @note  The context is not initialized, you must call av_hash_init().\n */\nint av_hash_alloc(struct AVHashContext **ctx, const char *name);\n\n/**\n * Get the names of available hash algorithms.\n *\n * This function can be used to enumerate the algorithms.\n *\n * @param i  index of the hash algorithm, starting from 0\n * @return   a pointer to a static string or NULL if i is out of range\n */\nconst char *av_hash_names(int i);\n\n/**\n * Get the name of the algorithm corresponding to the given hash context.\n */\nconst char *av_hash_get_name(const struct AVHashContext *ctx);\n\n/**\n * Maximum value that av_hash_get_size will currently return.\n *\n * You can use this if you absolutely want or need to use static allocation\n * and are fine with not supporting hashes newly added to libavutil without\n * recompilation.\n * Note that you still need to check against av_hash_get_size, adding new hashes\n * with larger sizes will not be considered an ABI change and should not cause\n * your code to overflow a buffer.\n */\n#define AV_HASH_MAX_SIZE 64\n\n/**\n * Get the size of the resulting hash value in bytes.\n *\n * The pointer passed to av_hash_final have space for at least this many bytes.\n */\nint av_hash_get_size(const struct AVHashContext *ctx);\n\n/**\n * Initialize or reset a hash context.\n */\nvoid av_hash_init(struct AVHashContext *ctx);\n\n/**\n * Update a hash context with additional data.\n */\nvoid av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len);\n\n/**\n * Finalize a hash context and compute the actual hash value.\n */\nvoid av_hash_final(struct AVHashContext *ctx, uint8_t *dst);\n\n/**\n * Finalize a hash context and compute the actual hash value.\n * If size is smaller than the hash size, the hash is truncated;\n * if size is larger, the buffer is padded with 0.\n */\nvoid av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size);\n\n/**\n * Finalize a hash context and compute the actual hash value as a hex string.\n * The string is always 0-terminated.\n * If size is smaller than 2 * hash_size + 1, the hex string is truncated.\n */\nvoid av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size);\n\n/**\n * Finalize a hash context and compute the actual hash value as a base64 string.\n * The string is always 0-terminated.\n * If size is smaller than AV_BASE64_SIZE(hash_size), the base64 string is\n * truncated.\n */\nvoid av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size);\n\n/**\n * Free hash context.\n */\nvoid av_hash_freep(struct AVHashContext **ctx);\n\n#endif /* AVUTIL_HASH_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/hmac.h",
    "content": "/*\n * Copyright (C) 2012 Martin Storsjo\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_HMAC_H\n#define AVUTIL_HMAC_H\n\n#include <stdint.h>\n\n/**\n * @defgroup lavu_hmac HMAC\n * @ingroup lavu_crypto\n * @{\n */\n\nenum AVHMACType {\n    AV_HMAC_MD5,\n    AV_HMAC_SHA1,\n    AV_HMAC_SHA224 = 10,\n    AV_HMAC_SHA256,\n    AV_HMAC_SHA384,\n    AV_HMAC_SHA512,\n};\n\ntypedef struct AVHMAC AVHMAC;\n\n/**\n * Allocate an AVHMAC context.\n * @param type The hash function used for the HMAC.\n */\nAVHMAC *av_hmac_alloc(enum AVHMACType type);\n\n/**\n * Free an AVHMAC context.\n * @param ctx The context to free, may be NULL\n */\nvoid av_hmac_free(AVHMAC *ctx);\n\n/**\n * Initialize an AVHMAC context with an authentication key.\n * @param ctx    The HMAC context\n * @param key    The authentication key\n * @param keylen The length of the key, in bytes\n */\nvoid av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen);\n\n/**\n * Hash data with the HMAC.\n * @param ctx  The HMAC context\n * @param data The data to hash\n * @param len  The length of the data, in bytes\n */\nvoid av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len);\n\n/**\n * Finish hashing and output the HMAC digest.\n * @param ctx    The HMAC context\n * @param out    The output buffer to write the digest into\n * @param outlen The length of the out buffer, in bytes\n * @return       The number of bytes written to out, or a negative error code.\n */\nint av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen);\n\n/**\n * Hash an array of data with a key.\n * @param ctx    The HMAC context\n * @param data   The data to hash\n * @param len    The length of the data, in bytes\n * @param key    The authentication key\n * @param keylen The length of the key, in bytes\n * @param out    The output buffer to write the digest into\n * @param outlen The length of the out buffer, in bytes\n * @return       The number of bytes written to out, or a negative error code.\n */\nint av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len,\n                 const uint8_t *key, unsigned int keylen,\n                 uint8_t *out, unsigned int outlen);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_HMAC_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/imgutils.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_IMGUTILS_H\n#define AVUTIL_IMGUTILS_H\n\n/**\n * @file\n * misc image utilities\n *\n * @addtogroup lavu_picture\n * @{\n */\n\n#include \"avutil.h\"\n#include \"pixdesc.h\"\n#include \"rational.h\"\n\n/**\n * Compute the max pixel step for each plane of an image with a\n * format described by pixdesc.\n *\n * The pixel step is the distance in bytes between the first byte of\n * the group of bytes which describe a pixel component and the first\n * byte of the successive group in the same plane for the same\n * component.\n *\n * @param max_pixsteps an array which is filled with the max pixel step\n * for each plane. Since a plane may contain different pixel\n * components, the computed max_pixsteps[plane] is relative to the\n * component in the plane with the max pixel step.\n * @param max_pixstep_comps an array which is filled with the component\n * for each plane which has the max pixel step. May be NULL.\n */\nvoid av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],\n                                const AVPixFmtDescriptor *pixdesc);\n\n/**\n * Compute the size of an image line with format pix_fmt and width\n * width for the plane plane.\n *\n * @return the computed size in bytes\n */\nint av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane);\n\n/**\n * Fill plane linesizes for an image with pixel format pix_fmt and\n * width width.\n *\n * @param linesizes array to be filled with the linesize for each plane\n * @return >= 0 in case of success, a negative error code otherwise\n */\nint av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width);\n\n/**\n * Fill plane data pointers for an image with pixel format pix_fmt and\n * height height.\n *\n * @param data pointers array to be filled with the pointer for each image plane\n * @param ptr the pointer to a buffer which will contain the image\n * @param linesizes the array containing the linesize for each\n * plane, should be filled by av_image_fill_linesizes()\n * @return the size in bytes required for the image buffer, a negative\n * error code in case of failure\n */\nint av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,\n                           uint8_t *ptr, const int linesizes[4]);\n\n/**\n * Allocate an image with size w and h and pixel format pix_fmt, and\n * fill pointers and linesizes accordingly.\n * The allocated image buffer has to be freed by using\n * av_freep(&pointers[0]).\n *\n * @param align the value to use for buffer size alignment\n * @return the size in bytes required for the image buffer, a negative\n * error code in case of failure\n */\nint av_image_alloc(uint8_t *pointers[4], int linesizes[4],\n                   int w, int h, enum AVPixelFormat pix_fmt, int align);\n\n/**\n * Copy image plane from src to dst.\n * That is, copy \"height\" number of lines of \"bytewidth\" bytes each.\n * The first byte of each successive line is separated by *_linesize\n * bytes.\n *\n * bytewidth must be contained by both absolute values of dst_linesize\n * and src_linesize, otherwise the function behavior is undefined.\n *\n * @param dst_linesize linesize for the image plane in dst\n * @param src_linesize linesize for the image plane in src\n */\nvoid av_image_copy_plane(uint8_t       *dst, int dst_linesize,\n                         const uint8_t *src, int src_linesize,\n                         int bytewidth, int height);\n\n/**\n * Copy image in src_data to dst_data.\n *\n * @param dst_linesizes linesizes for the image in dst_data\n * @param src_linesizes linesizes for the image in src_data\n */\nvoid av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],\n                   const uint8_t *src_data[4], const int src_linesizes[4],\n                   enum AVPixelFormat pix_fmt, int width, int height);\n\n/**\n * Setup the data pointers and linesizes based on the specified image\n * parameters and the provided array.\n *\n * The fields of the given image are filled in by using the src\n * address which points to the image data buffer. Depending on the\n * specified pixel format, one or multiple image data pointers and\n * line sizes will be set.  If a planar format is specified, several\n * pointers will be set pointing to the different picture planes and\n * the line sizes of the different planes will be stored in the\n * lines_sizes array. Call with src == NULL to get the required\n * size for the src buffer.\n *\n * To allocate the buffer and fill in the dst_data and dst_linesize in\n * one call, use av_image_alloc().\n *\n * @param dst_data      data pointers to be filled in\n * @param dst_linesizes linesizes for the image in dst_data to be filled in\n * @param src           buffer which will contain or contains the actual image data, can be NULL\n * @param pix_fmt       the pixel format of the image\n * @param width         the width of the image in pixels\n * @param height        the height of the image in pixels\n * @param align         the value used in src for linesize alignment\n * @return the size in bytes required for src, a negative error code\n * in case of failure\n */\nint av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],\n                         const uint8_t *src,\n                         enum AVPixelFormat pix_fmt, int width, int height, int align);\n\n/**\n * Return the size in bytes of the amount of data required to store an\n * image with the given parameters.\n *\n * @param[in] align the assumed linesize alignment\n */\nint av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align);\n\n/**\n * Copy image data from an image into a buffer.\n *\n * av_image_get_buffer_size() can be used to compute the required size\n * for the buffer to fill.\n *\n * @param dst           a buffer into which picture data will be copied\n * @param dst_size      the size in bytes of dst\n * @param src_data      pointers containing the source image data\n * @param src_linesizes linesizes for the image in src_data\n * @param pix_fmt       the pixel format of the source image\n * @param width         the width of the source image in pixels\n * @param height        the height of the source image in pixels\n * @param align         the assumed linesize alignment for dst\n * @return the number of bytes written to dst, or a negative value\n * (error code) on error\n */\nint av_image_copy_to_buffer(uint8_t *dst, int dst_size,\n                            const uint8_t * const src_data[4], const int src_linesize[4],\n                            enum AVPixelFormat pix_fmt, int width, int height, int align);\n\n/**\n * Check if the given dimension of an image is valid, meaning that all\n * bytes of the image can be addressed with a signed int.\n *\n * @param w the width of the picture\n * @param h the height of the picture\n * @param log_offset the offset to sum to the log level for logging with log_ctx\n * @param log_ctx the parent logging context, it may be NULL\n * @return >= 0 if valid, a negative error code otherwise\n */\nint av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);\n\n/**\n * Check if the given sample aspect ratio of an image is valid.\n *\n * It is considered invalid if the denominator is 0 or if applying the ratio\n * to the image size would make the smaller dimension less than 1. If the\n * sar numerator is 0, it is considered unknown and will return as valid.\n *\n * @param w width of the image\n * @param h height of the image\n * @param sar sample aspect ratio of the image\n * @return 0 if valid, a negative AVERROR code otherwise\n */\nint av_image_check_sar(unsigned int w, unsigned int h, AVRational sar);\n\n/**\n * @}\n */\n\n\n#endif /* AVUTIL_IMGUTILS_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/intfloat.h",
    "content": "/*\n * Copyright (c) 2011 Mans Rullgard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_INTFLOAT_H\n#define AVUTIL_INTFLOAT_H\n\n#include <stdint.h>\n#include \"attributes.h\"\n\nunion av_intfloat32 {\n    uint32_t i;\n    float    f;\n};\n\nunion av_intfloat64 {\n    uint64_t i;\n    double   f;\n};\n\n/**\n * Reinterpret a 32-bit integer as a float.\n */\nstatic av_always_inline float av_int2float(uint32_t i)\n{\n    union av_intfloat32 v;\n    v.i = i;\n    return v.f;\n}\n\n/**\n * Reinterpret a float as a 32-bit integer.\n */\nstatic av_always_inline uint32_t av_float2int(float f)\n{\n    union av_intfloat32 v;\n    v.f = f;\n    return v.i;\n}\n\n/**\n * Reinterpret a 64-bit integer as a double.\n */\nstatic av_always_inline double av_int2double(uint64_t i)\n{\n    union av_intfloat64 v;\n    v.i = i;\n    return v.f;\n}\n\n/**\n * Reinterpret a double as a 64-bit integer.\n */\nstatic av_always_inline uint64_t av_double2int(double f)\n{\n    union av_intfloat64 v;\n    v.f = f;\n    return v.i;\n}\n\n#endif /* AVUTIL_INTFLOAT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/intreadwrite.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_INTREADWRITE_H\n#define AVUTIL_INTREADWRITE_H\n\n#include <stdint.h>\n#include \"libavutil/avconfig.h\"\n#include \"attributes.h\"\n#include \"bswap.h\"\n\ntypedef union {\n    uint64_t u64;\n    uint32_t u32[2];\n    uint16_t u16[4];\n    uint8_t  u8 [8];\n    double   f64;\n    float    f32[2];\n} av_alias av_alias64;\n\ntypedef union {\n    uint32_t u32;\n    uint16_t u16[2];\n    uint8_t  u8 [4];\n    float    f32;\n} av_alias av_alias32;\n\ntypedef union {\n    uint16_t u16;\n    uint8_t  u8 [2];\n} av_alias av_alias16;\n\n/*\n * Arch-specific headers can provide any combination of\n * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.\n * Preprocessor symbols must be defined, even if these are implemented\n * as inline functions.\n *\n * R/W means read/write, B/L/N means big/little/native endianness.\n * The following macros require aligned access, compared to their\n * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A.\n * Incorrect usage may range from abysmal performance to crash\n * depending on the platform.\n *\n * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U.\n */\n\n#ifdef HAVE_AV_CONFIG_H\n\n#include \"config.h\"\n\n#if   ARCH_ARM\n#   include \"arm/intreadwrite.h\"\n#elif ARCH_AVR32\n#   include \"avr32/intreadwrite.h\"\n#elif ARCH_MIPS\n#   include \"mips/intreadwrite.h\"\n#elif ARCH_PPC\n#   include \"ppc/intreadwrite.h\"\n#elif ARCH_TOMI\n#   include \"tomi/intreadwrite.h\"\n#elif ARCH_X86\n#   include \"x86/intreadwrite.h\"\n#endif\n\n#endif /* HAVE_AV_CONFIG_H */\n\n/*\n * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.\n */\n\n#if AV_HAVE_BIGENDIAN\n\n#   if    defined(AV_RN16) && !defined(AV_RB16)\n#       define AV_RB16(p) AV_RN16(p)\n#   elif !defined(AV_RN16) &&  defined(AV_RB16)\n#       define AV_RN16(p) AV_RB16(p)\n#   endif\n\n#   if    defined(AV_WN16) && !defined(AV_WB16)\n#       define AV_WB16(p, v) AV_WN16(p, v)\n#   elif !defined(AV_WN16) &&  defined(AV_WB16)\n#       define AV_WN16(p, v) AV_WB16(p, v)\n#   endif\n\n#   if    defined(AV_RN24) && !defined(AV_RB24)\n#       define AV_RB24(p) AV_RN24(p)\n#   elif !defined(AV_RN24) &&  defined(AV_RB24)\n#       define AV_RN24(p) AV_RB24(p)\n#   endif\n\n#   if    defined(AV_WN24) && !defined(AV_WB24)\n#       define AV_WB24(p, v) AV_WN24(p, v)\n#   elif !defined(AV_WN24) &&  defined(AV_WB24)\n#       define AV_WN24(p, v) AV_WB24(p, v)\n#   endif\n\n#   if    defined(AV_RN32) && !defined(AV_RB32)\n#       define AV_RB32(p) AV_RN32(p)\n#   elif !defined(AV_RN32) &&  defined(AV_RB32)\n#       define AV_RN32(p) AV_RB32(p)\n#   endif\n\n#   if    defined(AV_WN32) && !defined(AV_WB32)\n#       define AV_WB32(p, v) AV_WN32(p, v)\n#   elif !defined(AV_WN32) &&  defined(AV_WB32)\n#       define AV_WN32(p, v) AV_WB32(p, v)\n#   endif\n\n#   if    defined(AV_RN48) && !defined(AV_RB48)\n#       define AV_RB48(p) AV_RN48(p)\n#   elif !defined(AV_RN48) &&  defined(AV_RB48)\n#       define AV_RN48(p) AV_RB48(p)\n#   endif\n\n#   if    defined(AV_WN48) && !defined(AV_WB48)\n#       define AV_WB48(p, v) AV_WN48(p, v)\n#   elif !defined(AV_WN48) &&  defined(AV_WB48)\n#       define AV_WN48(p, v) AV_WB48(p, v)\n#   endif\n\n#   if    defined(AV_RN64) && !defined(AV_RB64)\n#       define AV_RB64(p) AV_RN64(p)\n#   elif !defined(AV_RN64) &&  defined(AV_RB64)\n#       define AV_RN64(p) AV_RB64(p)\n#   endif\n\n#   if    defined(AV_WN64) && !defined(AV_WB64)\n#       define AV_WB64(p, v) AV_WN64(p, v)\n#   elif !defined(AV_WN64) &&  defined(AV_WB64)\n#       define AV_WN64(p, v) AV_WB64(p, v)\n#   endif\n\n#else /* AV_HAVE_BIGENDIAN */\n\n#   if    defined(AV_RN16) && !defined(AV_RL16)\n#       define AV_RL16(p) AV_RN16(p)\n#   elif !defined(AV_RN16) &&  defined(AV_RL16)\n#       define AV_RN16(p) AV_RL16(p)\n#   endif\n\n#   if    defined(AV_WN16) && !defined(AV_WL16)\n#       define AV_WL16(p, v) AV_WN16(p, v)\n#   elif !defined(AV_WN16) &&  defined(AV_WL16)\n#       define AV_WN16(p, v) AV_WL16(p, v)\n#   endif\n\n#   if    defined(AV_RN24) && !defined(AV_RL24)\n#       define AV_RL24(p) AV_RN24(p)\n#   elif !defined(AV_RN24) &&  defined(AV_RL24)\n#       define AV_RN24(p) AV_RL24(p)\n#   endif\n\n#   if    defined(AV_WN24) && !defined(AV_WL24)\n#       define AV_WL24(p, v) AV_WN24(p, v)\n#   elif !defined(AV_WN24) &&  defined(AV_WL24)\n#       define AV_WN24(p, v) AV_WL24(p, v)\n#   endif\n\n#   if    defined(AV_RN32) && !defined(AV_RL32)\n#       define AV_RL32(p) AV_RN32(p)\n#   elif !defined(AV_RN32) &&  defined(AV_RL32)\n#       define AV_RN32(p) AV_RL32(p)\n#   endif\n\n#   if    defined(AV_WN32) && !defined(AV_WL32)\n#       define AV_WL32(p, v) AV_WN32(p, v)\n#   elif !defined(AV_WN32) &&  defined(AV_WL32)\n#       define AV_WN32(p, v) AV_WL32(p, v)\n#   endif\n\n#   if    defined(AV_RN48) && !defined(AV_RL48)\n#       define AV_RL48(p) AV_RN48(p)\n#   elif !defined(AV_RN48) &&  defined(AV_RL48)\n#       define AV_RN48(p) AV_RL48(p)\n#   endif\n\n#   if    defined(AV_WN48) && !defined(AV_WL48)\n#       define AV_WL48(p, v) AV_WN48(p, v)\n#   elif !defined(AV_WN48) &&  defined(AV_WL48)\n#       define AV_WN48(p, v) AV_WL48(p, v)\n#   endif\n\n#   if    defined(AV_RN64) && !defined(AV_RL64)\n#       define AV_RL64(p) AV_RN64(p)\n#   elif !defined(AV_RN64) &&  defined(AV_RL64)\n#       define AV_RN64(p) AV_RL64(p)\n#   endif\n\n#   if    defined(AV_WN64) && !defined(AV_WL64)\n#       define AV_WL64(p, v) AV_WN64(p, v)\n#   elif !defined(AV_WN64) &&  defined(AV_WL64)\n#       define AV_WN64(p, v) AV_WL64(p, v)\n#   endif\n\n#endif /* !AV_HAVE_BIGENDIAN */\n\n/*\n * Define AV_[RW]N helper macros to simplify definitions not provided\n * by per-arch headers.\n */\n\n#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)\n\nunion unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;\nunion unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;\nunion unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;\n\n#   define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)\n#   define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))\n\n#elif defined(__DECC)\n\n#   define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))\n#   define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))\n\n#elif AV_HAVE_FAST_UNALIGNED\n\n#   define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)\n#   define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))\n\n#else\n\n#ifndef AV_RB16\n#   define AV_RB16(x)                           \\\n    ((((const uint8_t*)(x))[0] << 8) |          \\\n      ((const uint8_t*)(x))[1])\n#endif\n#ifndef AV_WB16\n#   define AV_WB16(p, darg) do {                \\\n        unsigned d = (darg);                    \\\n        ((uint8_t*)(p))[1] = (d);               \\\n        ((uint8_t*)(p))[0] = (d)>>8;            \\\n    } while(0)\n#endif\n\n#ifndef AV_RL16\n#   define AV_RL16(x)                           \\\n    ((((const uint8_t*)(x))[1] << 8) |          \\\n      ((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL16\n#   define AV_WL16(p, darg) do {                \\\n        unsigned d = (darg);                    \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n    } while(0)\n#endif\n\n#ifndef AV_RB32\n#   define AV_RB32(x)                                \\\n    (((uint32_t)((const uint8_t*)(x))[0] << 24) |    \\\n               (((const uint8_t*)(x))[1] << 16) |    \\\n               (((const uint8_t*)(x))[2] <<  8) |    \\\n                ((const uint8_t*)(x))[3])\n#endif\n#ifndef AV_WB32\n#   define AV_WB32(p, darg) do {                \\\n        unsigned d = (darg);                    \\\n        ((uint8_t*)(p))[3] = (d);               \\\n        ((uint8_t*)(p))[2] = (d)>>8;            \\\n        ((uint8_t*)(p))[1] = (d)>>16;           \\\n        ((uint8_t*)(p))[0] = (d)>>24;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RL32\n#   define AV_RL32(x)                                \\\n    (((uint32_t)((const uint8_t*)(x))[3] << 24) |    \\\n               (((const uint8_t*)(x))[2] << 16) |    \\\n               (((const uint8_t*)(x))[1] <<  8) |    \\\n                ((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL32\n#   define AV_WL32(p, darg) do {                \\\n        unsigned d = (darg);                    \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[2] = (d)>>16;           \\\n        ((uint8_t*)(p))[3] = (d)>>24;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RB64\n#   define AV_RB64(x)                                   \\\n    (((uint64_t)((const uint8_t*)(x))[0] << 56) |       \\\n     ((uint64_t)((const uint8_t*)(x))[1] << 48) |       \\\n     ((uint64_t)((const uint8_t*)(x))[2] << 40) |       \\\n     ((uint64_t)((const uint8_t*)(x))[3] << 32) |       \\\n     ((uint64_t)((const uint8_t*)(x))[4] << 24) |       \\\n     ((uint64_t)((const uint8_t*)(x))[5] << 16) |       \\\n     ((uint64_t)((const uint8_t*)(x))[6] <<  8) |       \\\n      (uint64_t)((const uint8_t*)(x))[7])\n#endif\n#ifndef AV_WB64\n#   define AV_WB64(p, darg) do {                \\\n        uint64_t d = (darg);                    \\\n        ((uint8_t*)(p))[7] = (d);               \\\n        ((uint8_t*)(p))[6] = (d)>>8;            \\\n        ((uint8_t*)(p))[5] = (d)>>16;           \\\n        ((uint8_t*)(p))[4] = (d)>>24;           \\\n        ((uint8_t*)(p))[3] = (d)>>32;           \\\n        ((uint8_t*)(p))[2] = (d)>>40;           \\\n        ((uint8_t*)(p))[1] = (d)>>48;           \\\n        ((uint8_t*)(p))[0] = (d)>>56;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RL64\n#   define AV_RL64(x)                                   \\\n    (((uint64_t)((const uint8_t*)(x))[7] << 56) |       \\\n     ((uint64_t)((const uint8_t*)(x))[6] << 48) |       \\\n     ((uint64_t)((const uint8_t*)(x))[5] << 40) |       \\\n     ((uint64_t)((const uint8_t*)(x))[4] << 32) |       \\\n     ((uint64_t)((const uint8_t*)(x))[3] << 24) |       \\\n     ((uint64_t)((const uint8_t*)(x))[2] << 16) |       \\\n     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |       \\\n      (uint64_t)((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL64\n#   define AV_WL64(p, darg) do {                \\\n        uint64_t d = (darg);                    \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[2] = (d)>>16;           \\\n        ((uint8_t*)(p))[3] = (d)>>24;           \\\n        ((uint8_t*)(p))[4] = (d)>>32;           \\\n        ((uint8_t*)(p))[5] = (d)>>40;           \\\n        ((uint8_t*)(p))[6] = (d)>>48;           \\\n        ((uint8_t*)(p))[7] = (d)>>56;           \\\n    } while(0)\n#endif\n\n#if AV_HAVE_BIGENDIAN\n#   define AV_RN(s, p)    AV_RB##s(p)\n#   define AV_WN(s, p, v) AV_WB##s(p, v)\n#else\n#   define AV_RN(s, p)    AV_RL##s(p)\n#   define AV_WN(s, p, v) AV_WL##s(p, v)\n#endif\n\n#endif /* HAVE_FAST_UNALIGNED */\n\n#ifndef AV_RN16\n#   define AV_RN16(p) AV_RN(16, p)\n#endif\n\n#ifndef AV_RN32\n#   define AV_RN32(p) AV_RN(32, p)\n#endif\n\n#ifndef AV_RN64\n#   define AV_RN64(p) AV_RN(64, p)\n#endif\n\n#ifndef AV_WN16\n#   define AV_WN16(p, v) AV_WN(16, p, v)\n#endif\n\n#ifndef AV_WN32\n#   define AV_WN32(p, v) AV_WN(32, p, v)\n#endif\n\n#ifndef AV_WN64\n#   define AV_WN64(p, v) AV_WN(64, p, v)\n#endif\n\n#if AV_HAVE_BIGENDIAN\n#   define AV_RB(s, p)    AV_RN##s(p)\n#   define AV_WB(s, p, v) AV_WN##s(p, v)\n#   define AV_RL(s, p)    av_bswap##s(AV_RN##s(p))\n#   define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))\n#else\n#   define AV_RB(s, p)    av_bswap##s(AV_RN##s(p))\n#   define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))\n#   define AV_RL(s, p)    AV_RN##s(p)\n#   define AV_WL(s, p, v) AV_WN##s(p, v)\n#endif\n\n#define AV_RB8(x)     (((const uint8_t*)(x))[0])\n#define AV_WB8(p, d)  do { ((uint8_t*)(p))[0] = (d); } while(0)\n\n#define AV_RL8(x)     AV_RB8(x)\n#define AV_WL8(p, d)  AV_WB8(p, d)\n\n#ifndef AV_RB16\n#   define AV_RB16(p)    AV_RB(16, p)\n#endif\n#ifndef AV_WB16\n#   define AV_WB16(p, v) AV_WB(16, p, v)\n#endif\n\n#ifndef AV_RL16\n#   define AV_RL16(p)    AV_RL(16, p)\n#endif\n#ifndef AV_WL16\n#   define AV_WL16(p, v) AV_WL(16, p, v)\n#endif\n\n#ifndef AV_RB32\n#   define AV_RB32(p)    AV_RB(32, p)\n#endif\n#ifndef AV_WB32\n#   define AV_WB32(p, v) AV_WB(32, p, v)\n#endif\n\n#ifndef AV_RL32\n#   define AV_RL32(p)    AV_RL(32, p)\n#endif\n#ifndef AV_WL32\n#   define AV_WL32(p, v) AV_WL(32, p, v)\n#endif\n\n#ifndef AV_RB64\n#   define AV_RB64(p)    AV_RB(64, p)\n#endif\n#ifndef AV_WB64\n#   define AV_WB64(p, v) AV_WB(64, p, v)\n#endif\n\n#ifndef AV_RL64\n#   define AV_RL64(p)    AV_RL(64, p)\n#endif\n#ifndef AV_WL64\n#   define AV_WL64(p, v) AV_WL(64, p, v)\n#endif\n\n#ifndef AV_RB24\n#   define AV_RB24(x)                           \\\n    ((((const uint8_t*)(x))[0] << 16) |         \\\n     (((const uint8_t*)(x))[1] <<  8) |         \\\n      ((const uint8_t*)(x))[2])\n#endif\n#ifndef AV_WB24\n#   define AV_WB24(p, d) do {                   \\\n        ((uint8_t*)(p))[2] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[0] = (d)>>16;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RL24\n#   define AV_RL24(x)                           \\\n    ((((const uint8_t*)(x))[2] << 16) |         \\\n     (((const uint8_t*)(x))[1] <<  8) |         \\\n      ((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL24\n#   define AV_WL24(p, d) do {                   \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[2] = (d)>>16;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RB48\n#   define AV_RB48(x)                                     \\\n    (((uint64_t)((const uint8_t*)(x))[0] << 40) |         \\\n     ((uint64_t)((const uint8_t*)(x))[1] << 32) |         \\\n     ((uint64_t)((const uint8_t*)(x))[2] << 24) |         \\\n     ((uint64_t)((const uint8_t*)(x))[3] << 16) |         \\\n     ((uint64_t)((const uint8_t*)(x))[4] <<  8) |         \\\n      (uint64_t)((const uint8_t*)(x))[5])\n#endif\n#ifndef AV_WB48\n#   define AV_WB48(p, darg) do {                \\\n        uint64_t d = (darg);                    \\\n        ((uint8_t*)(p))[5] = (d);               \\\n        ((uint8_t*)(p))[4] = (d)>>8;            \\\n        ((uint8_t*)(p))[3] = (d)>>16;           \\\n        ((uint8_t*)(p))[2] = (d)>>24;           \\\n        ((uint8_t*)(p))[1] = (d)>>32;           \\\n        ((uint8_t*)(p))[0] = (d)>>40;           \\\n    } while(0)\n#endif\n\n#ifndef AV_RL48\n#   define AV_RL48(x)                                     \\\n    (((uint64_t)((const uint8_t*)(x))[5] << 40) |         \\\n     ((uint64_t)((const uint8_t*)(x))[4] << 32) |         \\\n     ((uint64_t)((const uint8_t*)(x))[3] << 24) |         \\\n     ((uint64_t)((const uint8_t*)(x))[2] << 16) |         \\\n     ((uint64_t)((const uint8_t*)(x))[1] <<  8) |         \\\n      (uint64_t)((const uint8_t*)(x))[0])\n#endif\n#ifndef AV_WL48\n#   define AV_WL48(p, darg) do {                \\\n        uint64_t d = (darg);                    \\\n        ((uint8_t*)(p))[0] = (d);               \\\n        ((uint8_t*)(p))[1] = (d)>>8;            \\\n        ((uint8_t*)(p))[2] = (d)>>16;           \\\n        ((uint8_t*)(p))[3] = (d)>>24;           \\\n        ((uint8_t*)(p))[4] = (d)>>32;           \\\n        ((uint8_t*)(p))[5] = (d)>>40;           \\\n    } while(0)\n#endif\n\n/*\n * The AV_[RW]NA macros access naturally aligned data\n * in a type-safe way.\n */\n\n#define AV_RNA(s, p)    (((const av_alias##s*)(p))->u##s)\n#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))\n\n#ifndef AV_RN16A\n#   define AV_RN16A(p) AV_RNA(16, p)\n#endif\n\n#ifndef AV_RN32A\n#   define AV_RN32A(p) AV_RNA(32, p)\n#endif\n\n#ifndef AV_RN64A\n#   define AV_RN64A(p) AV_RNA(64, p)\n#endif\n\n#ifndef AV_WN16A\n#   define AV_WN16A(p, v) AV_WNA(16, p, v)\n#endif\n\n#ifndef AV_WN32A\n#   define AV_WN32A(p, v) AV_WNA(32, p, v)\n#endif\n\n#ifndef AV_WN64A\n#   define AV_WN64A(p, v) AV_WNA(64, p, v)\n#endif\n\n/*\n * The AV_COPYxxU macros are suitable for copying data to/from unaligned\n * memory locations.\n */\n\n#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s));\n\n#ifndef AV_COPY16U\n#   define AV_COPY16U(d, s) AV_COPYU(16, d, s)\n#endif\n\n#ifndef AV_COPY32U\n#   define AV_COPY32U(d, s) AV_COPYU(32, d, s)\n#endif\n\n#ifndef AV_COPY64U\n#   define AV_COPY64U(d, s) AV_COPYU(64, d, s)\n#endif\n\n#ifndef AV_COPY128U\n#   define AV_COPY128U(d, s)                                    \\\n    do {                                                        \\\n        AV_COPY64U(d, s);                                       \\\n        AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8);     \\\n    } while(0)\n#endif\n\n/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be\n * naturally aligned. They may be implemented using MMX,\n * so emms_c() must be called before using any float code\n * afterwards.\n */\n\n#define AV_COPY(n, d, s) \\\n    (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)\n\n#ifndef AV_COPY16\n#   define AV_COPY16(d, s) AV_COPY(16, d, s)\n#endif\n\n#ifndef AV_COPY32\n#   define AV_COPY32(d, s) AV_COPY(32, d, s)\n#endif\n\n#ifndef AV_COPY64\n#   define AV_COPY64(d, s) AV_COPY(64, d, s)\n#endif\n\n#ifndef AV_COPY128\n#   define AV_COPY128(d, s)                    \\\n    do {                                       \\\n        AV_COPY64(d, s);                       \\\n        AV_COPY64((char*)(d)+8, (char*)(s)+8); \\\n    } while(0)\n#endif\n\n#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))\n\n#ifndef AV_SWAP64\n#   define AV_SWAP64(a, b) AV_SWAP(64, a, b)\n#endif\n\n#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)\n\n#ifndef AV_ZERO16\n#   define AV_ZERO16(d) AV_ZERO(16, d)\n#endif\n\n#ifndef AV_ZERO32\n#   define AV_ZERO32(d) AV_ZERO(32, d)\n#endif\n\n#ifndef AV_ZERO64\n#   define AV_ZERO64(d) AV_ZERO(64, d)\n#endif\n\n#ifndef AV_ZERO128\n#   define AV_ZERO128(d)         \\\n    do {                         \\\n        AV_ZERO64(d);            \\\n        AV_ZERO64((char*)(d)+8); \\\n    } while(0)\n#endif\n\n#endif /* AVUTIL_INTREADWRITE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/lfg.h",
    "content": "/*\n * Lagged Fibonacci PRNG\n * Copyright (c) 2008 Michael Niedermayer\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_LFG_H\n#define AVUTIL_LFG_H\n\ntypedef struct AVLFG {\n    unsigned int state[64];\n    int index;\n} AVLFG;\n\nvoid av_lfg_init(AVLFG *c, unsigned int seed);\n\n/**\n * Get the next random unsigned 32-bit number using an ALFG.\n *\n * Please also consider a simple LCG like state= state*1664525+1013904223,\n * it may be good enough and faster for your specific use case.\n */\nstatic inline unsigned int av_lfg_get(AVLFG *c){\n    c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63];\n    return c->state[c->index++ & 63];\n}\n\n/**\n * Get the next random unsigned 32-bit number using a MLFG.\n *\n * Please also consider av_lfg_get() above, it is faster.\n */\nstatic inline unsigned int av_mlfg_get(AVLFG *c){\n    unsigned int a= c->state[(c->index-55) & 63];\n    unsigned int b= c->state[(c->index-24) & 63];\n    return c->state[c->index++ & 63] = 2*a*b+a+b;\n}\n\n/**\n * Get the next two numbers generated by a Box-Muller Gaussian\n * generator using the random numbers issued by lfg.\n *\n * @param out array where the two generated numbers are placed\n */\nvoid av_bmg_get(AVLFG *lfg, double out[2]);\n\n#endif /* AVUTIL_LFG_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/log.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_LOG_H\n#define AVUTIL_LOG_H\n\n#include <stdarg.h>\n#include \"avutil.h\"\n#include \"attributes.h\"\n\ntypedef enum {\n    AV_CLASS_CATEGORY_NA = 0,\n    AV_CLASS_CATEGORY_INPUT,\n    AV_CLASS_CATEGORY_OUTPUT,\n    AV_CLASS_CATEGORY_MUXER,\n    AV_CLASS_CATEGORY_DEMUXER,\n    AV_CLASS_CATEGORY_ENCODER,\n    AV_CLASS_CATEGORY_DECODER,\n    AV_CLASS_CATEGORY_FILTER,\n    AV_CLASS_CATEGORY_BITSTREAM_FILTER,\n    AV_CLASS_CATEGORY_SWSCALER,\n    AV_CLASS_CATEGORY_SWRESAMPLER,\n    AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,\n    AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,\n    AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,\n    AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,\n    AV_CLASS_CATEGORY_DEVICE_OUTPUT,\n    AV_CLASS_CATEGORY_DEVICE_INPUT,\n    AV_CLASS_CATEGORY_NB, ///< not part of ABI/API\n}AVClassCategory;\n\n#define AV_IS_INPUT_DEVICE(category) \\\n    (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \\\n     ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \\\n     ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT))\n\n#define AV_IS_OUTPUT_DEVICE(category) \\\n    (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \\\n     ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \\\n     ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT))\n\nstruct AVOptionRanges;\n\n/**\n * Describe the class of an AVClass context structure. That is an\n * arbitrary struct of which the first field is a pointer to an\n * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).\n */\ntypedef struct AVClass {\n    /**\n     * The name of the class; usually it is the same name as the\n     * context structure type to which the AVClass is associated.\n     */\n    const char* class_name;\n\n    /**\n     * A pointer to a function which returns the name of a context\n     * instance ctx associated with the class.\n     */\n    const char* (*item_name)(void* ctx);\n\n    /**\n     * a pointer to the first option specified in the class if any or NULL\n     *\n     * @see av_set_default_options()\n     */\n    const struct AVOption *option;\n\n    /**\n     * LIBAVUTIL_VERSION with which this structure was created.\n     * This is used to allow fields to be added without requiring major\n     * version bumps everywhere.\n     */\n\n    int version;\n\n    /**\n     * Offset in the structure where log_level_offset is stored.\n     * 0 means there is no such variable\n     */\n    int log_level_offset_offset;\n\n    /**\n     * Offset in the structure where a pointer to the parent context for\n     * logging is stored. For example a decoder could pass its AVCodecContext\n     * to eval as such a parent context, which an av_log() implementation\n     * could then leverage to display the parent context.\n     * The offset can be NULL.\n     */\n    int parent_log_context_offset;\n\n    /**\n     * Return next AVOptions-enabled child or NULL\n     */\n    void* (*child_next)(void *obj, void *prev);\n\n    /**\n     * Return an AVClass corresponding to the next potential\n     * AVOptions-enabled child.\n     *\n     * The difference between child_next and this is that\n     * child_next iterates over _already existing_ objects, while\n     * child_class_next iterates over _all possible_ children.\n     */\n    const struct AVClass* (*child_class_next)(const struct AVClass *prev);\n\n    /**\n     * Category used for visualization (like color)\n     * This is only set if the category is equal for all objects using this class.\n     * available since version (51 << 16 | 56 << 8 | 100)\n     */\n    AVClassCategory category;\n\n    /**\n     * Callback to return the category.\n     * available since version (51 << 16 | 59 << 8 | 100)\n     */\n    AVClassCategory (*get_category)(void* ctx);\n\n    /**\n     * Callback to return the supported/allowed ranges.\n     * available since version (52.12)\n     */\n    int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags);\n} AVClass;\n\n/**\n * @addtogroup lavu_log\n *\n * @{\n *\n * @defgroup lavu_log_constants Logging Constants\n *\n * @{\n */\n\n/**\n * Print no output.\n */\n#define AV_LOG_QUIET    -8\n\n/**\n * Something went really wrong and we will crash now.\n */\n#define AV_LOG_PANIC     0\n\n/**\n * Something went wrong and recovery is not possible.\n * For example, no header was found for a format which depends\n * on headers or an illegal combination of parameters is used.\n */\n#define AV_LOG_FATAL     8\n\n/**\n * Something went wrong and cannot losslessly be recovered.\n * However, not all future data is affected.\n */\n#define AV_LOG_ERROR    16\n\n/**\n * Something somehow does not look correct. This may or may not\n * lead to problems. An example would be the use of '-vstrict -2'.\n */\n#define AV_LOG_WARNING  24\n\n/**\n * Standard information.\n */\n#define AV_LOG_INFO     32\n\n/**\n * Detailed information.\n */\n#define AV_LOG_VERBOSE  40\n\n/**\n * Stuff which is only useful for libav* developers.\n */\n#define AV_LOG_DEBUG    48\n\n#define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET)\n\n/**\n * @}\n */\n\n/**\n * Sets additional colors for extended debugging sessions.\n * @code\n   av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), \"Message in purple\\n\");\n   @endcode\n * Requires 256color terminal support. Uses outside debugging is not\n * recommended.\n */\n#define AV_LOG_C(x) ((x) << 8)\n\n/**\n * Send the specified message to the log if the level is less than or equal\n * to the current av_log_level. By default, all logging messages are sent to\n * stderr. This behavior can be altered by setting a different logging callback\n * function.\n * @see av_log_set_callback\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n *        pointer to an AVClass struct.\n * @param level The importance level of the message expressed using a @ref\n *        lavu_log_constants \"Logging Constant\".\n * @param fmt The format string (printf-compatible) that specifies how\n *        subsequent arguments are converted to output.\n */\nvoid av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);\n\n\n/**\n * Send the specified message to the log if the level is less than or equal\n * to the current av_log_level. By default, all logging messages are sent to\n * stderr. This behavior can be altered by setting a different logging callback\n * function.\n * @see av_log_set_callback\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n *        pointer to an AVClass struct.\n * @param level The importance level of the message expressed using a @ref\n *        lavu_log_constants \"Logging Constant\".\n * @param fmt The format string (printf-compatible) that specifies how\n *        subsequent arguments are converted to output.\n * @param vl The arguments referenced by the format string.\n */\nvoid av_vlog(void *avcl, int level, const char *fmt, va_list vl);\n\n/**\n * Get the current log level\n *\n * @see lavu_log_constants\n *\n * @return Current log level\n */\nint av_log_get_level(void);\n\n/**\n * Set the log level\n *\n * @see lavu_log_constants\n *\n * @param level Logging level\n */\nvoid av_log_set_level(int level);\n\n/**\n * Set the logging callback\n *\n * @note The callback must be thread safe, even if the application does not use\n *       threads itself as some codecs are multithreaded.\n *\n * @see av_log_default_callback\n *\n * @param callback A logging function with a compatible signature.\n */\nvoid av_log_set_callback(void (*callback)(void*, int, const char*, va_list));\n\n/**\n * Default logging callback\n *\n * It prints the message to stderr, optionally colorizing it.\n *\n * @param avcl A pointer to an arbitrary struct of which the first field is a\n *        pointer to an AVClass struct.\n * @param level The importance level of the message expressed using a @ref\n *        lavu_log_constants \"Logging Constant\".\n * @param fmt The format string (printf-compatible) that specifies how\n *        subsequent arguments are converted to output.\n * @param vl The arguments referenced by the format string.\n */\nvoid av_log_default_callback(void *avcl, int level, const char *fmt,\n                             va_list vl);\n\n/**\n * Return the context name\n *\n * @param  ctx The AVClass context\n *\n * @return The AVClass class_name\n */\nconst char* av_default_item_name(void* ctx);\nAVClassCategory av_default_get_category(void *ptr);\n\n/**\n * Format a line of log the same way as the default callback.\n * @param line          buffer to receive the formated line\n * @param line_size     size of the buffer\n * @param print_prefix  used to store whether the prefix must be printed;\n *                      must point to a persistent integer initially set to 1\n */\nvoid av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,\n                        char *line, int line_size, int *print_prefix);\n\n/**\n * av_dlog macros\n * Useful to print debug messages that shouldn't get compiled in normally.\n */\n\n#ifdef DEBUG\n#    define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)\n#else\n#    define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)\n#endif\n\n/**\n * Skip repeated messages, this requires the user app to use av_log() instead of\n * (f)printf as the 2 would otherwise interfere and lead to\n * \"Last message repeated x times\" messages below (f)printf messages with some\n * bad luck.\n * Also to receive the last, \"last repeated\" line if any, the user app must\n * call av_log(NULL, AV_LOG_QUIET, \"%s\", \"\"); at the end\n */\n#define AV_LOG_SKIP_REPEATED 1\n\n/**\n * Include the log severity in messages originating from codecs.\n *\n * Results in messages such as:\n * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts\n */\n#define AV_LOG_PRINT_LEVEL 2\n\nvoid av_log_set_flags(int arg);\nint av_log_get_flags(void);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_LOG_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/lzo.h",
    "content": "/*\n * LZO 1x decompression\n * copyright (c) 2006 Reimar Doeffinger\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_LZO_H\n#define AVUTIL_LZO_H\n\n/**\n * @defgroup lavu_lzo LZO\n * @ingroup lavu_crypto\n *\n * @{\n */\n\n#include <stdint.h>\n\n/** @name Error flags returned by av_lzo1x_decode\n * @{ */\n/// end of the input buffer reached before decoding finished\n#define AV_LZO_INPUT_DEPLETED  1\n/// decoded data did not fit into output buffer\n#define AV_LZO_OUTPUT_FULL     2\n/// a reference to previously decoded data was wrong\n#define AV_LZO_INVALID_BACKPTR 4\n/// a non-specific error in the compressed bitstream\n#define AV_LZO_ERROR           8\n/** @} */\n\n#define AV_LZO_INPUT_PADDING   8\n#define AV_LZO_OUTPUT_PADDING 12\n\n/**\n * @brief Decodes LZO 1x compressed data.\n * @param out output buffer\n * @param outlen size of output buffer, number of bytes left are returned here\n * @param in input buffer\n * @param inlen size of input buffer, number of bytes left are returned here\n * @return 0 on success, otherwise a combination of the error flags above\n *\n * Make sure all buffers are appropriately padded, in must provide\n * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes.\n */\nint av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_LZO_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/macros.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * @ingroup lavu\n * Utility Preprocessor macros\n */\n\n#ifndef AVUTIL_MACROS_H\n#define AVUTIL_MACROS_H\n\n/**\n * @addtogroup preproc_misc Preprocessor String Macros\n *\n * String manipulation macros\n *\n * @{\n */\n\n#define AV_STRINGIFY(s)         AV_TOSTRING(s)\n#define AV_TOSTRING(s) #s\n\n#define AV_GLUE(a, b) a ## b\n#define AV_JOIN(a, b) AV_GLUE(a, b)\n\n/**\n * @}\n */\n\n#define AV_PRAGMA(s) _Pragma(#s)\n\n#endif /* AVUTIL_MACROS_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/mathematics.h",
    "content": "/*\n * copyright (c) 2005-2012 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_MATHEMATICS_H\n#define AVUTIL_MATHEMATICS_H\n\n#include <stdint.h>\n#include <math.h>\n#include \"attributes.h\"\n#include \"rational.h\"\n#include \"intfloat.h\"\n\n#ifndef M_E\n#define M_E            2.7182818284590452354   /* e */\n#endif\n#ifndef M_LN2\n#define M_LN2          0.69314718055994530942  /* log_e 2 */\n#endif\n#ifndef M_LN10\n#define M_LN10         2.30258509299404568402  /* log_e 10 */\n#endif\n#ifndef M_LOG2_10\n#define M_LOG2_10      3.32192809488736234787  /* log_2 10 */\n#endif\n#ifndef M_PHI\n#define M_PHI          1.61803398874989484820   /* phi / golden ratio */\n#endif\n#ifndef M_PI\n#define M_PI           3.14159265358979323846  /* pi */\n#endif\n#ifndef M_PI_2\n#define M_PI_2         1.57079632679489661923  /* pi/2 */\n#endif\n#ifndef M_SQRT1_2\n#define M_SQRT1_2      0.70710678118654752440  /* 1/sqrt(2) */\n#endif\n#ifndef M_SQRT2\n#define M_SQRT2        1.41421356237309504880  /* sqrt(2) */\n#endif\n#ifndef NAN\n#define NAN            av_int2float(0x7fc00000)\n#endif\n#ifndef INFINITY\n#define INFINITY       av_int2float(0x7f800000)\n#endif\n\n/**\n * @addtogroup lavu_math\n * @{\n */\n\n\nenum AVRounding {\n    AV_ROUND_ZERO     = 0, ///< Round toward zero.\n    AV_ROUND_INF      = 1, ///< Round away from zero.\n    AV_ROUND_DOWN     = 2, ///< Round toward -infinity.\n    AV_ROUND_UP       = 3, ///< Round toward +infinity.\n    AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.\n    AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE\n};\n\n/**\n * Return the greatest common divisor of a and b.\n * If both a and b are 0 or either or both are <0 then behavior is\n * undefined.\n */\nint64_t av_const av_gcd(int64_t a, int64_t b);\n\n/**\n * Rescale a 64-bit integer with rounding to nearest.\n * A simple a*b/c isn't possible as it can overflow.\n */\nint64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;\n\n/**\n * Rescale a 64-bit integer with specified rounding.\n * A simple a*b/c isn't possible as it can overflow.\n *\n * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is\n *         INT64_MIN or INT64_MAX then a is passed through unchanged.\n */\nint64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const;\n\n/**\n * Rescale a 64-bit integer by 2 rational numbers.\n */\nint64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;\n\n/**\n * Rescale a 64-bit integer by 2 rational numbers with specified rounding.\n *\n * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is\n *         INT64_MIN or INT64_MAX then a is passed through unchanged.\n */\nint64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,\n                         enum AVRounding) av_const;\n\n/**\n * Compare 2 timestamps each in its own timebases.\n * The result of the function is undefined if one of the timestamps\n * is outside the int64_t range when represented in the others timebase.\n * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position\n */\nint av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);\n\n/**\n * Compare 2 integers modulo mod.\n * That is we compare integers a and b for which only the least\n * significant log2(mod) bits are known.\n *\n * @param mod must be a power of 2\n * @return a negative value if a is smaller than b\n *         a positive value if a is greater than b\n *         0                if a equals          b\n */\nint64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);\n\n/**\n * Rescale a timestamp while preserving known durations.\n *\n * @param in_ts Input timestamp\n * @param in_tb Input timebase\n * @param fs_tb Duration and *last timebase\n * @param duration duration till the next call\n * @param out_tb Output timebase\n */\nint64_t av_rescale_delta(AVRational in_tb, int64_t in_ts,  AVRational fs_tb, int duration, int64_t *last, AVRational out_tb);\n\n/**\n * Add a value to a timestamp.\n *\n * This function guarantees that when the same value is repeatly added that\n * no accumulation of rounding errors occurs.\n *\n * @param ts Input timestamp\n * @param ts_tb Input timestamp timebase\n * @param inc value to add to ts\n * @param inc_tb inc timebase\n */\nint64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc);\n\n\n    /**\n * @}\n */\n\n#endif /* AVUTIL_MATHEMATICS_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/md5.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_MD5_H\n#define AVUTIL_MD5_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_md5 MD5\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_md5_size;\n\nstruct AVMD5;\n\n/**\n * Allocate an AVMD5 context.\n */\nstruct AVMD5 *av_md5_alloc(void);\n\n/**\n * Initialize MD5 hashing.\n *\n * @param ctx pointer to the function context (of size av_md5_size)\n */\nvoid av_md5_init(struct AVMD5 *ctx);\n\n/**\n * Update hash value.\n *\n * @param ctx hash function context\n * @param src input data to update hash with\n * @param len input data length\n */\nvoid av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len);\n\n/**\n * Finish hashing and output digest value.\n *\n * @param ctx hash function context\n * @param dst buffer where output digest value is stored\n */\nvoid av_md5_final(struct AVMD5 *ctx, uint8_t *dst);\n\n/**\n * Hash an array of data.\n *\n * @param dst The output buffer to write the digest into\n * @param src The data to hash\n * @param len The length of the data, in bytes\n */\nvoid av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_MD5_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/mem.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * memory handling functions\n */\n\n#ifndef AVUTIL_MEM_H\n#define AVUTIL_MEM_H\n\n#include <limits.h>\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"error.h\"\n#include \"avutil.h\"\n\n/**\n * @addtogroup lavu_mem\n * @{\n */\n\n\n#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)\n    #define DECLARE_ALIGNED(n,t,v)      t __attribute__ ((aligned (n))) v\n    #define DECLARE_ASM_CONST(n,t,v)    const t __attribute__ ((aligned (n))) v\n#elif defined(__TI_COMPILER_VERSION__)\n    #define DECLARE_ALIGNED(n,t,v)                      \\\n        AV_PRAGMA(DATA_ALIGN(v,n))                      \\\n        t __attribute__((aligned(n))) v\n    #define DECLARE_ASM_CONST(n,t,v)                    \\\n        AV_PRAGMA(DATA_ALIGN(v,n))                      \\\n        static const t __attribute__((aligned(n))) v\n#elif defined(__GNUC__)\n    #define DECLARE_ALIGNED(n,t,v)      t __attribute__ ((aligned (n))) v\n    #define DECLARE_ASM_CONST(n,t,v)    static const t av_used __attribute__ ((aligned (n))) v\n#elif defined(_MSC_VER)\n    #define DECLARE_ALIGNED(n,t,v)      __declspec(align(n)) t v\n    #define DECLARE_ASM_CONST(n,t,v)    __declspec(align(n)) static const t v\n#else\n    #define DECLARE_ALIGNED(n,t,v)      t v\n    #define DECLARE_ASM_CONST(n,t,v)    static const t v\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(3,1)\n    #define av_malloc_attrib __attribute__((__malloc__))\n#else\n    #define av_malloc_attrib\n#endif\n\n#if AV_GCC_VERSION_AT_LEAST(4,3)\n    #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))\n#else\n    #define av_alloc_size(...)\n#endif\n\n/**\n * Allocate a block of size bytes with alignment suitable for all\n * memory accesses (including vectors if available on the CPU).\n * @param size Size in bytes for the memory block to be allocated.\n * @return Pointer to the allocated block, NULL if the block cannot\n * be allocated.\n * @see av_mallocz()\n */\nvoid *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);\n\n/**\n * Allocate a block of size * nmemb bytes with av_malloc().\n * @param nmemb Number of elements\n * @param size Size of the single element\n * @return Pointer to the allocated block, NULL if the block cannot\n * be allocated.\n * @see av_malloc()\n */\nav_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size)\n{\n    if (!size || nmemb >= INT_MAX / size)\n        return NULL;\n    return av_malloc(nmemb * size);\n}\n\n/**\n * Allocate or reallocate a block of memory.\n * If ptr is NULL and size > 0, allocate a new block. If\n * size is zero, free the memory block pointed to by ptr.\n * @param ptr Pointer to a memory block already allocated with\n * av_realloc() or NULL.\n * @param size Size in bytes of the memory block to be allocated or\n * reallocated.\n * @return Pointer to a newly-reallocated block or NULL if the block\n * cannot be reallocated or the function is used to free the memory block.\n * @warning Pointers originating from the av_malloc() family of functions must\n *          not be passed to av_realloc(). The former can be implemented using\n *          memalign() (or other functions), and there is no guarantee that\n *          pointers from such functions can be passed to realloc() at all.\n *          The situation is undefined according to POSIX and may crash with\n *          some libc implementations.\n * @see av_fast_realloc()\n */\nvoid *av_realloc(void *ptr, size_t size) av_alloc_size(2);\n\n/**\n * Allocate or reallocate a block of memory.\n * This function does the same thing as av_realloc, except:\n * - It takes two arguments and checks the result of the multiplication for\n *   integer overflow.\n * - It frees the input block in case of failure, thus avoiding the memory\n *   leak with the classic \"buf = realloc(buf); if (!buf) return -1;\".\n */\nvoid *av_realloc_f(void *ptr, size_t nelem, size_t elsize);\n\n/**\n * Allocate or reallocate a block of memory.\n * If *ptr is NULL and size > 0, allocate a new block. If\n * size is zero, free the memory block pointed to by ptr.\n * @param   ptr Pointer to a pointer to a memory block already allocated\n *          with av_realloc(), or pointer to a pointer to NULL.\n *          The pointer is updated on success, or freed on failure.\n * @param   size Size in bytes for the memory block to be allocated or\n *          reallocated\n * @return  Zero on success, an AVERROR error code on failure.\n * @warning Pointers originating from the av_malloc() family of functions must\n *          not be passed to av_reallocp(). The former can be implemented using\n *          memalign() (or other functions), and there is no guarantee that\n *          pointers from such functions can be passed to realloc() at all.\n *          The situation is undefined according to POSIX and may crash with\n *          some libc implementations.\n */\nint av_reallocp(void *ptr, size_t size);\n\n/**\n * Allocate or reallocate an array.\n * If ptr is NULL and nmemb > 0, allocate a new block. If\n * nmemb is zero, free the memory block pointed to by ptr.\n * @param ptr Pointer to a memory block already allocated with\n * av_realloc() or NULL.\n * @param nmemb Number of elements\n * @param size Size of the single element\n * @return Pointer to a newly-reallocated block or NULL if the block\n * cannot be reallocated or the function is used to free the memory block.\n * @warning Pointers originating from the av_malloc() family of functions must\n *          not be passed to av_realloc(). The former can be implemented using\n *          memalign() (or other functions), and there is no guarantee that\n *          pointers from such functions can be passed to realloc() at all.\n *          The situation is undefined according to POSIX and may crash with\n *          some libc implementations.\n */\nav_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);\n\n/**\n * Allocate or reallocate an array through a pointer to a pointer.\n * If *ptr is NULL and nmemb > 0, allocate a new block. If\n * nmemb is zero, free the memory block pointed to by ptr.\n * @param ptr Pointer to a pointer to a memory block already allocated\n * with av_realloc(), or pointer to a pointer to NULL.\n * The pointer is updated on success, or freed on failure.\n * @param nmemb Number of elements\n * @param size Size of the single element\n * @return Zero on success, an AVERROR error code on failure.\n * @warning Pointers originating from the av_malloc() family of functions must\n *          not be passed to av_realloc(). The former can be implemented using\n *          memalign() (or other functions), and there is no guarantee that\n *          pointers from such functions can be passed to realloc() at all.\n *          The situation is undefined according to POSIX and may crash with\n *          some libc implementations.\n */\nav_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size);\n\n/**\n * Free a memory block which has been allocated with av_malloc(z)() or\n * av_realloc().\n * @param ptr Pointer to the memory block which should be freed.\n * @note ptr = NULL is explicitly allowed.\n * @note It is recommended that you use av_freep() instead.\n * @see av_freep()\n */\nvoid av_free(void *ptr);\n\n/**\n * Allocate a block of size bytes with alignment suitable for all\n * memory accesses (including vectors if available on the CPU) and\n * zero all the bytes of the block.\n * @param size Size in bytes for the memory block to be allocated.\n * @return Pointer to the allocated block, NULL if it cannot be allocated.\n * @see av_malloc()\n */\nvoid *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);\n\n/**\n * Allocate a block of nmemb * size bytes with alignment suitable for all\n * memory accesses (including vectors if available on the CPU) and\n * zero all the bytes of the block.\n * The allocation will fail if nmemb * size is greater than or equal\n * to INT_MAX.\n * @param nmemb\n * @param size\n * @return Pointer to the allocated block, NULL if it cannot be allocated.\n */\nvoid *av_calloc(size_t nmemb, size_t size) av_malloc_attrib;\n\n/**\n * Allocate a block of size * nmemb bytes with av_mallocz().\n * @param nmemb Number of elements\n * @param size Size of the single element\n * @return Pointer to the allocated block, NULL if the block cannot\n * be allocated.\n * @see av_mallocz()\n * @see av_malloc_array()\n */\nav_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size)\n{\n    if (!size || nmemb >= INT_MAX / size)\n        return NULL;\n    return av_mallocz(nmemb * size);\n}\n\n/**\n * Duplicate the string s.\n * @param s string to be duplicated\n * @return Pointer to a newly-allocated string containing a\n * copy of s or NULL if the string cannot be allocated.\n */\nchar *av_strdup(const char *s) av_malloc_attrib;\n\n/**\n * Duplicate a substring of the string s.\n * @param s string to be duplicated\n * @param len the maximum length of the resulting string (not counting the\n *            terminating byte).\n * @return Pointer to a newly-allocated string containing a\n * copy of s or NULL if the string cannot be allocated.\n */\nchar *av_strndup(const char *s, size_t len) av_malloc_attrib;\n\n/**\n * Duplicate the buffer p.\n * @param p buffer to be duplicated\n * @return Pointer to a newly allocated buffer containing a\n * copy of p or NULL if the buffer cannot be allocated.\n */\nvoid *av_memdup(const void *p, size_t size);\n\n/**\n * Free a memory block which has been allocated with av_malloc(z)() or\n * av_realloc() and set the pointer pointing to it to NULL.\n * @param ptr Pointer to the pointer to the memory block which should\n * be freed.\n * @note passing a pointer to a NULL pointer is safe and leads to no action.\n * @see av_free()\n */\nvoid av_freep(void *ptr);\n\n/**\n * Add an element to a dynamic array.\n *\n * The array to grow is supposed to be an array of pointers to\n * structures, and the element to add must be a pointer to an already\n * allocated structure.\n *\n * The array is reallocated when its size reaches powers of 2.\n * Therefore, the amortized cost of adding an element is constant.\n *\n * In case of success, the pointer to the array is updated in order to\n * point to the new grown array, and the number pointed to by nb_ptr\n * is incremented.\n * In case of failure, the array is freed, *tab_ptr is set to NULL and\n * *nb_ptr is set to 0.\n *\n * @param tab_ptr pointer to the array to grow\n * @param nb_ptr  pointer to the number of elements in the array\n * @param elem    element to add\n * @see av_dynarray_add_nofree(), av_dynarray2_add()\n */\nvoid av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);\n\n/**\n * Add an element to a dynamic array.\n *\n * Function has the same functionality as av_dynarray_add(),\n * but it doesn't free memory on fails. It returns error code\n * instead and leave current buffer untouched.\n *\n * @param tab_ptr pointer to the array to grow\n * @param nb_ptr  pointer to the number of elements in the array\n * @param elem    element to add\n * @return >=0 on success, negative otherwise.\n * @see av_dynarray_add(), av_dynarray2_add()\n */\nint av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem);\n\n/**\n * Add an element of size elem_size to a dynamic array.\n *\n * The array is reallocated when its number of elements reaches powers of 2.\n * Therefore, the amortized cost of adding an element is constant.\n *\n * In case of success, the pointer to the array is updated in order to\n * point to the new grown array, and the number pointed to by nb_ptr\n * is incremented.\n * In case of failure, the array is freed, *tab_ptr is set to NULL and\n * *nb_ptr is set to 0.\n *\n * @param tab_ptr   pointer to the array to grow\n * @param nb_ptr    pointer to the number of elements in the array\n * @param elem_size size in bytes of the elements in the array\n * @param elem_data pointer to the data of the element to add. If NULL, the space of\n *                  the new added element is not filled.\n * @return          pointer to the data of the element to copy in the new allocated space.\n *                  If NULL, the new allocated space is left uninitialized.\"\n * @see av_dynarray_add(), av_dynarray_add_nofree()\n */\nvoid *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,\n                       const uint8_t *elem_data);\n\n/**\n * Multiply two size_t values checking for overflow.\n * @return  0 if success, AVERROR(EINVAL) if overflow.\n */\nstatic inline int av_size_mult(size_t a, size_t b, size_t *r)\n{\n    size_t t = a * b;\n    /* Hack inspired from glibc: only try the division if nelem and elsize\n     * are both greater than sqrt(SIZE_MAX). */\n    if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)\n        return AVERROR(EINVAL);\n    *r = t;\n    return 0;\n}\n\n/**\n * Set the maximum size that may me allocated in one block.\n */\nvoid av_max_alloc(size_t max);\n\n/**\n * deliberately overlapping memcpy implementation\n * @param dst destination buffer\n * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0\n * @param cnt number of bytes to copy, must be >= 0\n *\n * cnt > back is valid, this will copy the bytes we just copied,\n * thus creating a repeating pattern with a period length of back.\n */\nvoid av_memcpy_backptr(uint8_t *dst, int back, int cnt);\n\n/**\n * Reallocate the given block if it is not large enough, otherwise do nothing.\n *\n * @see av_realloc\n */\nvoid *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);\n\n/**\n * Allocate a buffer, reusing the given one if large enough.\n *\n * Contrary to av_fast_realloc the current buffer contents might not be\n * preserved and on error the old buffer is freed, thus no special\n * handling to avoid memleaks is necessary.\n *\n * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer\n * @param size size of the buffer *ptr points to\n * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and\n *                 *size 0 if an error occurred.\n */\nvoid av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_MEM_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/motion_vector.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_MOTION_VECTOR_H\n#define AVUTIL_MOTION_VECTOR_H\n\n#include <stdint.h>\n\ntypedef struct AVMotionVector {\n    /**\n     * Where the current macroblock comes from; negative value when it comes\n     * from the past, positive value when it comes from the future.\n     * XXX: set exact relative ref frame reference instead of a +/- 1 \"direction\".\n     */\n    int32_t source;\n    /**\n     * Width and height of the block.\n     */\n    uint8_t w, h;\n    /**\n     * Absolute source position. Can be outside the frame area.\n     */\n    int16_t src_x, src_y;\n    /**\n     * Absolute destination position. Can be outside the frame area.\n     */\n    int16_t dst_x, dst_y;\n    /**\n     * Extra flag information.\n     * Currently unused.\n     */\n    uint64_t flags;\n} AVMotionVector;\n\n#endif /* AVUTIL_MOTION_VECTOR_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/murmur3.h",
    "content": "/*\n * Copyright (C) 2013 Reimar Döffinger <Reimar.Doeffinger@gmx.de>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_MURMUR3_H\n#define AVUTIL_MURMUR3_H\n\n#include <stdint.h>\n\nstruct AVMurMur3 *av_murmur3_alloc(void);\nvoid av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed);\nvoid av_murmur3_init(struct AVMurMur3 *c);\nvoid av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len);\nvoid av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]);\n\n#endif /* AVUTIL_MURMUR3_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/old_pix_fmts.h",
    "content": "/*\n * copyright (c) 2006-2012 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_OLD_PIX_FMTS_H\n#define AVUTIL_OLD_PIX_FMTS_H\n\n/*\n * This header exists to prevent new pixel formats from being accidentally added\n * to the deprecated list.\n * Do not include it directly. It will be removed on next major bump\n *\n * Do not add new items to this list. Use the AVPixelFormat enum instead.\n */\n    PIX_FMT_NONE = AV_PIX_FMT_NONE,\n    PIX_FMT_YUV420P,   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)\n    PIX_FMT_YUYV422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr\n    PIX_FMT_RGB24,     ///< packed RGB 8:8:8, 24bpp, RGBRGB...\n    PIX_FMT_BGR24,     ///< packed RGB 8:8:8, 24bpp, BGRBGR...\n    PIX_FMT_YUV422P,   ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)\n    PIX_FMT_YUV444P,   ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)\n    PIX_FMT_YUV410P,   ///< planar YUV 4:1:0,  9bpp, (1 Cr & Cb sample per 4x4 Y samples)\n    PIX_FMT_YUV411P,   ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)\n    PIX_FMT_GRAY8,     ///<        Y        ,  8bpp\n    PIX_FMT_MONOWHITE, ///<        Y        ,  1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb\n    PIX_FMT_MONOBLACK, ///<        Y        ,  1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb\n    PIX_FMT_PAL8,      ///< 8 bit with PIX_FMT_RGB32 palette\n    PIX_FMT_YUVJ420P,  ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range\n    PIX_FMT_YUVJ422P,  ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range\n    PIX_FMT_YUVJ444P,  ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range\n#if FF_API_XVMC\n    PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing\n    PIX_FMT_XVMC_MPEG2_IDCT,\n#endif /* FF_API_XVMC */\n    PIX_FMT_UYVY422,   ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1\n    PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3\n    PIX_FMT_BGR8,      ///< packed RGB 3:3:2,  8bpp, (msb)2B 3G 3R(lsb)\n    PIX_FMT_BGR4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits\n    PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1B 2G 1R(lsb)\n    PIX_FMT_RGB8,      ///< packed RGB 3:3:2,  8bpp, (msb)2R 3G 3B(lsb)\n    PIX_FMT_RGB4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits\n    PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1R 2G 1B(lsb)\n    PIX_FMT_NV12,      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)\n    PIX_FMT_NV21,      ///< as above, but U and V bytes are swapped\n\n    PIX_FMT_ARGB,      ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...\n    PIX_FMT_RGBA,      ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...\n    PIX_FMT_ABGR,      ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...\n    PIX_FMT_BGRA,      ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...\n\n    PIX_FMT_GRAY16BE,  ///<        Y        , 16bpp, big-endian\n    PIX_FMT_GRAY16LE,  ///<        Y        , 16bpp, little-endian\n    PIX_FMT_YUV440P,   ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)\n    PIX_FMT_YUVJ440P,  ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range\n    PIX_FMT_YUVA420P,  ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)\n#if FF_API_VDPAU\n    PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n#endif\n    PIX_FMT_RGB48BE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian\n    PIX_FMT_RGB48LE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian\n\n    PIX_FMT_RGB565BE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), big-endian\n    PIX_FMT_RGB565LE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), little-endian\n    PIX_FMT_RGB555BE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0\n    PIX_FMT_RGB555LE,  ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0\n\n    PIX_FMT_BGR565BE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), big-endian\n    PIX_FMT_BGR565LE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), little-endian\n    PIX_FMT_BGR555BE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1\n    PIX_FMT_BGR555LE,  ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1\n\n    PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers\n    PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers\n    PIX_FMT_VAAPI_VLD,  ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n\n    PIX_FMT_YUV420P16LE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV420P16BE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV422P16LE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_YUV422P16BE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV444P16LE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_YUV444P16BE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n#if FF_API_VDPAU\n    PIX_FMT_VDPAU_MPEG4,  ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n#endif\n    PIX_FMT_DXVA2_VLD,    ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer\n\n    PIX_FMT_RGB444LE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0\n    PIX_FMT_RGB444BE,  ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0\n    PIX_FMT_BGR444LE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1\n    PIX_FMT_BGR444BE,  ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1\n    PIX_FMT_GRAY8A,    ///< 8bit gray, 8bit alpha\n    PIX_FMT_BGR48BE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian\n    PIX_FMT_BGR48LE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian\n\n    //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus\n    //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately\n    //is better\n    PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_VDA_VLD,    ///< hardware decoding through VDA\n\n#ifdef AV_PIX_FMT_ABI_GIT_MASTER\n    PIX_FMT_RGBA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n#endif\n    PIX_FMT_GBRP,      ///< planar GBR 4:4:4 24bpp\n    PIX_FMT_GBRP9BE,   ///< planar GBR 4:4:4 27bpp, big endian\n    PIX_FMT_GBRP9LE,   ///< planar GBR 4:4:4 27bpp, little endian\n    PIX_FMT_GBRP10BE,  ///< planar GBR 4:4:4 30bpp, big endian\n    PIX_FMT_GBRP10LE,  ///< planar GBR 4:4:4 30bpp, little endian\n    PIX_FMT_GBRP16BE,  ///< planar GBR 4:4:4 48bpp, big endian\n    PIX_FMT_GBRP16LE,  ///< planar GBR 4:4:4 48bpp, little endian\n\n#ifndef AV_PIX_FMT_ABI_GIT_MASTER\n    PIX_FMT_RGBA64BE=0x123,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n#endif\n    PIX_FMT_0RGB=0x123+4,      ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...\n    PIX_FMT_RGB0,      ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...\n    PIX_FMT_0BGR,      ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...\n    PIX_FMT_BGR0,      ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...\n    PIX_FMT_YUVA444P,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)\n    PIX_FMT_YUVA422P,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)\n\n    PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    PIX_FMT_GBRP12BE,    ///< planar GBR 4:4:4 36bpp, big endian\n    PIX_FMT_GBRP12LE,    ///< planar GBR 4:4:4 36bpp, little endian\n    PIX_FMT_GBRP14BE,    ///< planar GBR 4:4:4 42bpp, big endian\n    PIX_FMT_GBRP14LE,    ///< planar GBR 4:4:4 42bpp, little endian\n\n    PIX_FMT_NB,        ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions\n#endif /* AVUTIL_OLD_PIX_FMTS_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/opt.h",
    "content": "/*\n * AVOptions\n * copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_OPT_H\n#define AVUTIL_OPT_H\n\n/**\n * @file\n * AVOptions\n */\n\n#include \"rational.h\"\n#include \"avutil.h\"\n#include \"dict.h\"\n#include \"log.h\"\n#include \"pixfmt.h\"\n#include \"samplefmt.h\"\n#include \"version.h\"\n\n/**\n * @defgroup avoptions AVOptions\n * @ingroup lavu_data\n * @{\n * AVOptions provide a generic system to declare options on arbitrary structs\n * (\"objects\"). An option can have a help text, a type and a range of possible\n * values. Options may then be enumerated, read and written to.\n *\n * @section avoptions_implement Implementing AVOptions\n * This section describes how to add AVOptions capabilities to a struct.\n *\n * All AVOptions-related information is stored in an AVClass. Therefore\n * the first member of the struct should be a pointer to an AVClass describing it.\n * The option field of the AVClass must be set to a NULL-terminated static array\n * of AVOptions. Each AVOption must have a non-empty name, a type, a default\n * value and for number-type AVOptions also a range of allowed values. It must\n * also declare an offset in bytes from the start of the struct, where the field\n * associated with this AVOption is located. Other fields in the AVOption struct\n * should also be set when applicable, but are not required.\n *\n * The following example illustrates an AVOptions-enabled struct:\n * @code\n * typedef struct test_struct {\n *     AVClass *class;\n *     int      int_opt;\n *     char    *str_opt;\n *     uint8_t *bin_opt;\n *     int      bin_len;\n * } test_struct;\n *\n * static const AVOption test_options[] = {\n *   { \"test_int\", \"This is a test option of int type.\", offsetof(test_struct, int_opt),\n *     AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX },\n *   { \"test_str\", \"This is a test option of string type.\", offsetof(test_struct, str_opt),\n *     AV_OPT_TYPE_STRING },\n *   { \"test_bin\", \"This is a test option of binary type.\", offsetof(test_struct, bin_opt),\n *     AV_OPT_TYPE_BINARY },\n *   { NULL },\n * };\n *\n * static const AVClass test_class = {\n *     .class_name = \"test class\",\n *     .item_name  = av_default_item_name,\n *     .option     = test_options,\n *     .version    = LIBAVUTIL_VERSION_INT,\n * };\n * @endcode\n *\n * Next, when allocating your struct, you must ensure that the AVClass pointer\n * is set to the correct value. Then, av_opt_set_defaults() can be called to\n * initialize defaults. After that the struct is ready to be used with the\n * AVOptions API.\n *\n * When cleaning up, you may use the av_opt_free() function to automatically\n * free all the allocated string and binary options.\n *\n * Continuing with the above example:\n *\n * @code\n * test_struct *alloc_test_struct(void)\n * {\n *     test_struct *ret = av_malloc(sizeof(*ret));\n *     ret->class = &test_class;\n *     av_opt_set_defaults(ret);\n *     return ret;\n * }\n * void free_test_struct(test_struct **foo)\n * {\n *     av_opt_free(*foo);\n *     av_freep(foo);\n * }\n * @endcode\n *\n * @subsection avoptions_implement_nesting Nesting\n *      It may happen that an AVOptions-enabled struct contains another\n *      AVOptions-enabled struct as a member (e.g. AVCodecContext in\n *      libavcodec exports generic options, while its priv_data field exports\n *      codec-specific options). In such a case, it is possible to set up the\n *      parent struct to export a child's options. To do that, simply\n *      implement AVClass.child_next() and AVClass.child_class_next() in the\n *      parent struct's AVClass.\n *      Assuming that the test_struct from above now also contains a\n *      child_struct field:\n *\n *      @code\n *      typedef struct child_struct {\n *          AVClass *class;\n *          int flags_opt;\n *      } child_struct;\n *      static const AVOption child_opts[] = {\n *          { \"test_flags\", \"This is a test option of flags type.\",\n *            offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX },\n *          { NULL },\n *      };\n *      static const AVClass child_class = {\n *          .class_name = \"child class\",\n *          .item_name  = av_default_item_name,\n *          .option     = child_opts,\n *          .version    = LIBAVUTIL_VERSION_INT,\n *      };\n *\n *      void *child_next(void *obj, void *prev)\n *      {\n *          test_struct *t = obj;\n *          if (!prev && t->child_struct)\n *              return t->child_struct;\n *          return NULL\n *      }\n *      const AVClass child_class_next(const AVClass *prev)\n *      {\n *          return prev ? NULL : &child_class;\n *      }\n *      @endcode\n *      Putting child_next() and child_class_next() as defined above into\n *      test_class will now make child_struct's options accessible through\n *      test_struct (again, proper setup as described above needs to be done on\n *      child_struct right after it is created).\n *\n *      From the above example it might not be clear why both child_next()\n *      and child_class_next() are needed. The distinction is that child_next()\n *      iterates over actually existing objects, while child_class_next()\n *      iterates over all possible child classes. E.g. if an AVCodecContext\n *      was initialized to use a codec which has private options, then its\n *      child_next() will return AVCodecContext.priv_data and finish\n *      iterating. OTOH child_class_next() on AVCodecContext.av_class will\n *      iterate over all available codecs with private options.\n *\n * @subsection avoptions_implement_named_constants Named constants\n *      It is possible to create named constants for options. Simply set the unit\n *      field of the option the constants should apply to a string and\n *      create the constants themselves as options of type AV_OPT_TYPE_CONST\n *      with their unit field set to the same string.\n *      Their default_val field should contain the value of the named\n *      constant.\n *      For example, to add some named constants for the test_flags option\n *      above, put the following into the child_opts array:\n *      @code\n *      { \"test_flags\", \"This is a test option of flags type.\",\n *        offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, \"test_unit\" },\n *      { \"flag1\", \"This is a flag with value 16\", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, \"test_unit\" },\n *      @endcode\n *\n * @section avoptions_use Using AVOptions\n * This section deals with accessing options in an AVOptions-enabled struct.\n * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or\n * AVFormatContext in libavformat.\n *\n * @subsection avoptions_use_examine Examining AVOptions\n * The basic functions for examining options are av_opt_next(), which iterates\n * over all options defined for one object, and av_opt_find(), which searches\n * for an option with the given name.\n *\n * The situation is more complicated with nesting. An AVOptions-enabled struct\n * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag\n * to av_opt_find() will make the function search children recursively.\n *\n * For enumerating there are basically two cases. The first is when you want to\n * get all options that may potentially exist on the struct and its children\n * (e.g.  when constructing documentation). In that case you should call\n * av_opt_child_class_next() recursively on the parent struct's AVClass.  The\n * second case is when you have an already initialized struct with all its\n * children and you want to get all options that can be actually written or read\n * from it. In that case you should call av_opt_child_next() recursively (and\n * av_opt_next() on each result).\n *\n * @subsection avoptions_use_get_set Reading and writing AVOptions\n * When setting options, you often have a string read directly from the\n * user. In such a case, simply passing it to av_opt_set() is enough. For\n * non-string type options, av_opt_set() will parse the string according to the\n * option type.\n *\n * Similarly av_opt_get() will read any option type and convert it to a string\n * which will be returned. Do not forget that the string is allocated, so you\n * have to free it with av_free().\n *\n * In some cases it may be more convenient to put all options into an\n * AVDictionary and call av_opt_set_dict() on it. A specific case of this\n * are the format/codec open functions in lavf/lavc which take a dictionary\n * filled with option as a parameter. This allows to set some options\n * that cannot be set otherwise, since e.g. the input file format is not known\n * before the file is actually opened.\n */\n\nenum AVOptionType{\n    AV_OPT_TYPE_FLAGS,\n    AV_OPT_TYPE_INT,\n    AV_OPT_TYPE_INT64,\n    AV_OPT_TYPE_DOUBLE,\n    AV_OPT_TYPE_FLOAT,\n    AV_OPT_TYPE_STRING,\n    AV_OPT_TYPE_RATIONAL,\n    AV_OPT_TYPE_BINARY,  ///< offset must point to a pointer immediately followed by an int for the length\n    AV_OPT_TYPE_DICT,\n    AV_OPT_TYPE_CONST = 128,\n    AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers\n    AV_OPT_TYPE_PIXEL_FMT  = MKBETAG('P','F','M','T'),\n    AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'),\n    AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational\n    AV_OPT_TYPE_DURATION   = MKBETAG('D','U','R',' '),\n    AV_OPT_TYPE_COLOR      = MKBETAG('C','O','L','R'),\n    AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'),\n#if FF_API_OLD_AVOPTIONS\n    FF_OPT_TYPE_FLAGS = 0,\n    FF_OPT_TYPE_INT,\n    FF_OPT_TYPE_INT64,\n    FF_OPT_TYPE_DOUBLE,\n    FF_OPT_TYPE_FLOAT,\n    FF_OPT_TYPE_STRING,\n    FF_OPT_TYPE_RATIONAL,\n    FF_OPT_TYPE_BINARY,  ///< offset must point to a pointer immediately followed by an int for the length\n    FF_OPT_TYPE_CONST=128,\n#endif\n};\n\n/**\n * AVOption\n */\ntypedef struct AVOption {\n    const char *name;\n\n    /**\n     * short English help text\n     * @todo What about other languages?\n     */\n    const char *help;\n\n    /**\n     * The offset relative to the context structure where the option\n     * value is stored. It should be 0 for named constants.\n     */\n    int offset;\n    enum AVOptionType type;\n\n    /**\n     * the default value for scalar options\n     */\n    union {\n        int64_t i64;\n        double dbl;\n        const char *str;\n        /* TODO those are unused now */\n        AVRational q;\n    } default_val;\n    double min;                 ///< minimum valid value for the option\n    double max;                 ///< maximum valid value for the option\n\n    int flags;\n#define AV_OPT_FLAG_ENCODING_PARAM  1   ///< a generic parameter which can be set by the user for muxing or encoding\n#define AV_OPT_FLAG_DECODING_PARAM  2   ///< a generic parameter which can be set by the user for demuxing or decoding\n#if FF_API_OPT_TYPE_METADATA\n#define AV_OPT_FLAG_METADATA        4   ///< some data extracted or inserted into the file like title, comment, ...\n#endif\n#define AV_OPT_FLAG_AUDIO_PARAM     8\n#define AV_OPT_FLAG_VIDEO_PARAM     16\n#define AV_OPT_FLAG_SUBTITLE_PARAM  32\n/**\n * The option is inteded for exporting values to the caller.\n */\n#define AV_OPT_FLAG_EXPORT          64\n/**\n * The option may not be set through the AVOptions API, only read.\n * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set.\n */\n#define AV_OPT_FLAG_READONLY        128\n#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering\n//FIXME think about enc-audio, ... style flags\n\n    /**\n     * The logical unit to which the option belongs. Non-constant\n     * options and corresponding named constants share the same\n     * unit. May be NULL.\n     */\n    const char *unit;\n} AVOption;\n\n/**\n * A single allowed range of values, or a single allowed value.\n */\ntypedef struct AVOptionRange {\n    const char *str;\n    /**\n     * Value range.\n     * For string ranges this represents the min/max length.\n     * For dimensions this represents the min/max pixel count or width/height in multi-component case.\n     */\n    double value_min, value_max;\n    /**\n     * Value's component range.\n     * For string this represents the unicode range for chars, 0-127 limits to ASCII.\n     */\n    double component_min, component_max;\n    /**\n     * Range flag.\n     * If set to 1 the struct encodes a range, if set to 0 a single value.\n     */\n    int is_range;\n} AVOptionRange;\n\n/**\n * List of AVOptionRange structs.\n */\ntypedef struct AVOptionRanges {\n    /**\n     * Array of option ranges.\n     *\n     * Most of option types use just one component.\n     * Following describes multi-component option types:\n     *\n     * AV_OPT_TYPE_IMAGE_SIZE:\n     * component index 0: range of pixel count (width * height).\n     * component index 1: range of width.\n     * component index 2: range of height.\n     *\n     * @note To obtain multi-component version of this structure, user must\n     *       provide AV_OPT_MULTI_COMPONENT_RANGE to av_opt_query_ranges or\n     *       av_opt_query_ranges_default function.\n     *\n     * Multi-component range can be read as in following example:\n     *\n     * @code\n     * int range_index, component_index;\n     * AVOptionRanges *ranges;\n     * AVOptionRange *range[3]; //may require more than 3 in the future.\n     * av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE);\n     * for (range_index = 0; range_index < ranges->nb_ranges; range_index++) {\n     *     for (component_index = 0; component_index < ranges->nb_components; component_index++)\n     *         range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index];\n     *     //do something with range here.\n     * }\n     * av_opt_freep_ranges(&ranges);\n     * @endcode\n     */\n    AVOptionRange **range;\n    /**\n     * Number of ranges per component.\n     */\n    int nb_ranges;\n    /**\n     * Number of componentes.\n     */\n    int nb_components;\n} AVOptionRanges;\n\n\n#if FF_API_OLD_AVOPTIONS\n/**\n * Set the field of obj with the given name to value.\n *\n * @param[in] obj A struct whose first element is a pointer to an\n * AVClass.\n * @param[in] name the name of the field to set\n * @param[in] val The value to set. If the field is not of a string\n * type, then the given string is parsed.\n * SI postfixes and some named scalars are supported.\n * If the field is of a numeric type, it has to be a numeric or named\n * scalar. Behavior with more than one scalar and +- infix operators\n * is undefined.\n * If the field is of a flags type, it has to be a sequence of numeric\n * scalars or named flags separated by '+' or '-'. Prefixing a flag\n * with '+' causes it to be set without affecting the other flags;\n * similarly, '-' unsets a flag.\n * @param[out] o_out if non-NULL put here a pointer to the AVOption\n * found\n * @param alloc this parameter is currently ignored\n * @return 0 if the value has been set, or an AVERROR code in case of\n * error:\n * AVERROR_OPTION_NOT_FOUND if no matching option exists\n * AVERROR(ERANGE) if the value is out of range\n * AVERROR(EINVAL) if the value is not valid\n * @deprecated use av_opt_set()\n */\nattribute_deprecated\nint av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out);\n\nattribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n);\nattribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n);\nattribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n);\n\ndouble av_get_double(void *obj, const char *name, const AVOption **o_out);\nAVRational av_get_q(void *obj, const char *name, const AVOption **o_out);\nint64_t av_get_int(void *obj, const char *name, const AVOption **o_out);\nattribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len);\nattribute_deprecated const AVOption *av_next_option(FF_CONST_AVUTIL55 void *obj, const AVOption *last);\n#endif\n\n/**\n * Show the obj options.\n *\n * @param req_flags requested flags for the options to show. Show only the\n * options for which it is opt->flags & req_flags.\n * @param rej_flags rejected flags for the options to show. Show only the\n * options for which it is !(opt->flags & req_flags).\n * @param av_log_obj log context to use for showing the options\n */\nint av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags);\n\n/**\n * Set the values of all AVOption fields to their default values.\n *\n * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)\n */\nvoid av_opt_set_defaults(void *s);\n\n#if FF_API_OLD_AVOPTIONS\nattribute_deprecated\nvoid av_opt_set_defaults2(void *s, int mask, int flags);\n#endif\n\n/**\n * Parse the key/value pairs list in opts. For each key/value pair\n * found, stores the value in the field in ctx that is named like the\n * key. ctx must be an AVClass context, storing is done using\n * AVOptions.\n *\n * @param opts options string to parse, may be NULL\n * @param key_val_sep a 0-terminated list of characters used to\n * separate key from value\n * @param pairs_sep a 0-terminated list of characters used to separate\n * two pairs from each other\n * @return the number of successfully set key/value pairs, or a negative\n * value corresponding to an AVERROR code in case of error:\n * AVERROR(EINVAL) if opts cannot be parsed,\n * the error code issued by av_opt_set() if a key/value pair\n * cannot be set\n */\nint av_set_options_string(void *ctx, const char *opts,\n                          const char *key_val_sep, const char *pairs_sep);\n\n/**\n * Parse the key-value pairs list in opts. For each key=value pair found,\n * set the value of the corresponding option in ctx.\n *\n * @param ctx          the AVClass object to set options on\n * @param opts         the options string, key-value pairs separated by a\n *                     delimiter\n * @param shorthand    a NULL-terminated array of options names for shorthand\n *                     notation: if the first field in opts has no key part,\n *                     the key is taken from the first element of shorthand;\n *                     then again for the second, etc., until either opts is\n *                     finished, shorthand is finished or a named option is\n *                     found; after that, all options must be named\n * @param key_val_sep  a 0-terminated list of characters used to separate\n *                     key from value, for example '='\n * @param pairs_sep    a 0-terminated list of characters used to separate\n *                     two pairs from each other, for example ':' or ','\n * @return  the number of successfully set key=value pairs, or a negative\n *          value corresponding to an AVERROR code in case of error:\n *          AVERROR(EINVAL) if opts cannot be parsed,\n *          the error code issued by av_set_string3() if a key/value pair\n *          cannot be set\n *\n * Options names must use only the following characters: a-z A-Z 0-9 - . / _\n * Separators must use characters distinct from option names and from each\n * other.\n */\nint av_opt_set_from_string(void *ctx, const char *opts,\n                           const char *const *shorthand,\n                           const char *key_val_sep, const char *pairs_sep);\n/**\n * Free all allocated objects in obj.\n */\nvoid av_opt_free(void *obj);\n\n/**\n * Check whether a particular flag is set in a flags field.\n *\n * @param field_name the name of the flag field option\n * @param flag_name the name of the flag to check\n * @return non-zero if the flag is set, zero if the flag isn't set,\n *         isn't of the right type, or the flags field doesn't exist.\n */\nint av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name);\n\n/**\n * Set all the options from a given dictionary on an object.\n *\n * @param obj a struct whose first element is a pointer to AVClass\n * @param options options to process. This dictionary will be freed and replaced\n *                by a new one containing all options not found in obj.\n *                Of course this new dictionary needs to be freed by caller\n *                with av_dict_free().\n *\n * @return 0 on success, a negative AVERROR if some option was found in obj,\n *         but could not be set.\n *\n * @see av_dict_copy()\n */\nint av_opt_set_dict(void *obj, struct AVDictionary **options);\n\n\n/**\n * Set all the options from a given dictionary on an object.\n *\n * @param obj a struct whose first element is a pointer to AVClass\n * @param options options to process. This dictionary will be freed and replaced\n *                by a new one containing all options not found in obj.\n *                Of course this new dictionary needs to be freed by caller\n *                with av_dict_free().\n * @param search_flags A combination of AV_OPT_SEARCH_*.\n *\n * @return 0 on success, a negative AVERROR if some option was found in obj,\n *         but could not be set.\n *\n * @see av_dict_copy()\n */\nint av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags);\n\n/**\n * Extract a key-value pair from the beginning of a string.\n *\n * @param ropts        pointer to the options string, will be updated to\n *                     point to the rest of the string (one of the pairs_sep\n *                     or the final NUL)\n * @param key_val_sep  a 0-terminated list of characters used to separate\n *                     key from value, for example '='\n * @param pairs_sep    a 0-terminated list of characters used to separate\n *                     two pairs from each other, for example ':' or ','\n * @param flags        flags; see the AV_OPT_FLAG_* values below\n * @param rkey         parsed key; must be freed using av_free()\n * @param rval         parsed value; must be freed using av_free()\n *\n * @return  >=0 for success, or a negative value corresponding to an\n *          AVERROR code in case of error; in particular:\n *          AVERROR(EINVAL) if no key is present\n *\n */\nint av_opt_get_key_value(const char **ropts,\n                         const char *key_val_sep, const char *pairs_sep,\n                         unsigned flags,\n                         char **rkey, char **rval);\n\nenum {\n\n    /**\n     * Accept to parse a value without a key; the key will then be returned\n     * as NULL.\n     */\n    AV_OPT_FLAG_IMPLICIT_KEY = 1,\n};\n\n/**\n * @defgroup opt_eval_funcs Evaluating option strings\n * @{\n * This group of functions can be used to evaluate option strings\n * and get numbers out of them. They do the same thing as av_opt_set(),\n * except the result is written into the caller-supplied pointer.\n *\n * @param obj a struct whose first element is a pointer to AVClass.\n * @param o an option for which the string is to be evaluated.\n * @param val string to be evaluated.\n * @param *_out value of the string will be written here.\n *\n * @return 0 on success, a negative number on failure.\n */\nint av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int        *flags_out);\nint av_opt_eval_int   (void *obj, const AVOption *o, const char *val, int        *int_out);\nint av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t    *int64_out);\nint av_opt_eval_float (void *obj, const AVOption *o, const char *val, float      *float_out);\nint av_opt_eval_double(void *obj, const AVOption *o, const char *val, double     *double_out);\nint av_opt_eval_q     (void *obj, const AVOption *o, const char *val, AVRational *q_out);\n/**\n * @}\n */\n\n#define AV_OPT_SEARCH_CHILDREN   0x0001 /**< Search in possible children of the\n                                             given object first. */\n/**\n *  The obj passed to av_opt_find() is fake -- only a double pointer to AVClass\n *  instead of a required pointer to a struct containing AVClass. This is\n *  useful for searching for options without needing to allocate the corresponding\n *  object.\n */\n#define AV_OPT_SEARCH_FAKE_OBJ   0x0002\n\n/**\n *  Allows av_opt_query_ranges and av_opt_query_ranges_default to return more than\n *  one component for certain option types.\n *  @see AVOptionRanges for details.\n */\n#define AV_OPT_MULTI_COMPONENT_RANGE 0x1000\n\n/**\n * Look for an option in an object. Consider only options which\n * have all the specified flags set.\n *\n * @param[in] obj A pointer to a struct whose first element is a\n *                pointer to an AVClass.\n *                Alternatively a double pointer to an AVClass, if\n *                AV_OPT_SEARCH_FAKE_OBJ search flag is set.\n * @param[in] name The name of the option to look for.\n * @param[in] unit When searching for named constants, name of the unit\n *                 it belongs to.\n * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).\n * @param search_flags A combination of AV_OPT_SEARCH_*.\n *\n * @return A pointer to the option found, or NULL if no option\n *         was found.\n *\n * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable\n * directly with av_opt_set(). Use special calls which take an options\n * AVDictionary (e.g. avformat_open_input()) to set options found with this\n * flag.\n */\nconst AVOption *av_opt_find(void *obj, const char *name, const char *unit,\n                            int opt_flags, int search_flags);\n\n/**\n * Look for an option in an object. Consider only options which\n * have all the specified flags set.\n *\n * @param[in] obj A pointer to a struct whose first element is a\n *                pointer to an AVClass.\n *                Alternatively a double pointer to an AVClass, if\n *                AV_OPT_SEARCH_FAKE_OBJ search flag is set.\n * @param[in] name The name of the option to look for.\n * @param[in] unit When searching for named constants, name of the unit\n *                 it belongs to.\n * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).\n * @param search_flags A combination of AV_OPT_SEARCH_*.\n * @param[out] target_obj if non-NULL, an object to which the option belongs will be\n * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present\n * in search_flags. This parameter is ignored if search_flags contain\n * AV_OPT_SEARCH_FAKE_OBJ.\n *\n * @return A pointer to the option found, or NULL if no option\n *         was found.\n */\nconst AVOption *av_opt_find2(void *obj, const char *name, const char *unit,\n                             int opt_flags, int search_flags, void **target_obj);\n\n/**\n * Iterate over all AVOptions belonging to obj.\n *\n * @param obj an AVOptions-enabled struct or a double pointer to an\n *            AVClass describing it.\n * @param prev result of the previous call to av_opt_next() on this object\n *             or NULL\n * @return next AVOption or NULL\n */\nconst AVOption *av_opt_next(FF_CONST_AVUTIL55 void *obj, const AVOption *prev);\n\n/**\n * Iterate over AVOptions-enabled children of obj.\n *\n * @param prev result of a previous call to this function or NULL\n * @return next AVOptions-enabled child or NULL\n */\nvoid *av_opt_child_next(void *obj, void *prev);\n\n/**\n * Iterate over potential AVOptions-enabled children of parent.\n *\n * @param prev result of a previous call to this function or NULL\n * @return AVClass corresponding to next potential child or NULL\n */\nconst AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);\n\n/**\n * @defgroup opt_set_funcs Option setting functions\n * @{\n * Those functions set the field of obj with the given name to value.\n *\n * @param[in] obj A struct whose first element is a pointer to an AVClass.\n * @param[in] name the name of the field to set\n * @param[in] val The value to set. In case of av_opt_set() if the field is not\n * of a string type, then the given string is parsed.\n * SI postfixes and some named scalars are supported.\n * If the field is of a numeric type, it has to be a numeric or named\n * scalar. Behavior with more than one scalar and +- infix operators\n * is undefined.\n * If the field is of a flags type, it has to be a sequence of numeric\n * scalars or named flags separated by '+' or '-'. Prefixing a flag\n * with '+' causes it to be set without affecting the other flags;\n * similarly, '-' unsets a flag.\n * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN\n * is passed here, then the option may be set on a child of obj.\n *\n * @return 0 if the value has been set, or an AVERROR code in case of\n * error:\n * AVERROR_OPTION_NOT_FOUND if no matching option exists\n * AVERROR(ERANGE) if the value is out of range\n * AVERROR(EINVAL) if the value is not valid\n */\nint av_opt_set         (void *obj, const char *name, const char *val, int search_flags);\nint av_opt_set_int     (void *obj, const char *name, int64_t     val, int search_flags);\nint av_opt_set_double  (void *obj, const char *name, double      val, int search_flags);\nint av_opt_set_q       (void *obj, const char *name, AVRational  val, int search_flags);\nint av_opt_set_bin     (void *obj, const char *name, const uint8_t *val, int size, int search_flags);\nint av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags);\nint av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags);\nint av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags);\nint av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags);\nint av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags);\n/**\n * @note Any old dictionary present is discarded and replaced with a copy of the new one. The\n * caller still owns val is and responsible for freeing it.\n */\nint av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, int search_flags);\n\n/**\n * Set a binary option to an integer list.\n *\n * @param obj    AVClass object to set options on\n * @param name   name of the binary option\n * @param val    pointer to an integer list (must have the correct type with\n *               regard to the contents of the list)\n * @param term   list terminator (usually 0 or -1)\n * @param flags  search flags\n */\n#define av_opt_set_int_list(obj, name, val, term, flags) \\\n    (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \\\n     AVERROR(EINVAL) : \\\n     av_opt_set_bin(obj, name, (const uint8_t *)(val), \\\n                    av_int_list_length(val, term) * sizeof(*(val)), flags))\n\n/**\n * @}\n */\n\n/**\n * @defgroup opt_get_funcs Option getting functions\n * @{\n * Those functions get a value of the option with the given name from an object.\n *\n * @param[in] obj a struct whose first element is a pointer to an AVClass.\n * @param[in] name name of the option to get.\n * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN\n * is passed here, then the option may be found in a child of obj.\n * @param[out] out_val value of the option will be written here\n * @return >=0 on success, a negative error code otherwise\n */\n/**\n * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller\n */\nint av_opt_get         (void *obj, const char *name, int search_flags, uint8_t   **out_val);\nint av_opt_get_int     (void *obj, const char *name, int search_flags, int64_t    *out_val);\nint av_opt_get_double  (void *obj, const char *name, int search_flags, double     *out_val);\nint av_opt_get_q       (void *obj, const char *name, int search_flags, AVRational *out_val);\nint av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out);\nint av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt);\nint av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt);\nint av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val);\nint av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout);\n/**\n * @param[out] out_val The returned dictionary is a copy of the actual value and must\n * be freed with av_dict_free() by the caller\n */\nint av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val);\n/**\n * @}\n */\n/**\n * Gets a pointer to the requested field in a struct.\n * This function allows accessing a struct even when its fields are moved or\n * renamed since the application making the access has been compiled,\n *\n * @returns a pointer to the field, it can be cast to the correct type and read\n *          or written to.\n */\nvoid *av_opt_ptr(const AVClass *avclass, void *obj, const char *name);\n\n/**\n * Free an AVOptionRanges struct and set it to NULL.\n */\nvoid av_opt_freep_ranges(AVOptionRanges **ranges);\n\n/**\n * Get a list of allowed ranges for the given option.\n *\n * The returned list may depend on other fields in obj like for example profile.\n *\n * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored\n *              AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance\n *              AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges\n *\n * The result must be freed with av_opt_freep_ranges.\n *\n * @return number of compontents returned on success, a negative errro code otherwise\n */\nint av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags);\n\n/**\n * Copy options from src object into dest object.\n *\n * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object.\n * Original memory allocated for such options is freed unless both src and dest options points to the same memory.\n *\n * @param dest Object to copy from\n * @param src  Object to copy into\n * @return 0 on success, negative on error\n */\nint av_opt_copy(void *dest, FF_CONST_AVUTIL55 void *src);\n\n/**\n * Get a default list of allowed ranges for the given option.\n *\n * This list is constructed without using the AVClass.query_ranges() callback\n * and can be used as fallback from within the callback.\n *\n * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored\n *              AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance\n *              AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges\n *\n * The result must be freed with av_opt_free_ranges.\n *\n * @return number of compontents returned on success, a negative errro code otherwise\n */\nint av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags);\n\n/**\n * Check if given option is set to its default value.\n *\n * Options o must belong to the obj. This function must not be called to check child's options state.\n * @see av_opt_is_set_to_default_by_name().\n *\n * @param obj  AVClass object to check option on\n * @param o    option to be checked\n * @return     >0 when option is set to its default,\n *              0 when option is not set its default,\n *             <0 on error\n */\nint av_opt_is_set_to_default(void *obj, const AVOption *o);\n\n/**\n * Check if given option is set to its default value.\n *\n * @param obj          AVClass object to check option on\n * @param name         option name\n * @param search_flags combination of AV_OPT_SEARCH_*\n * @return             >0 when option is set to its default,\n *                     0 when option is not set its default,\n *                     <0 on error\n */\nint av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags);\n\n\n#define AV_OPT_SERIALIZE_SKIP_DEFAULTS              0x00000001  ///< Serialize options that are not set to default values only.\n#define AV_OPT_SERIALIZE_OPT_FLAGS_EXACT            0x00000002  ///< Serialize options that exactly match opt_flags only.\n\n/**\n * Serialize object's options.\n *\n * Create a string containing object's serialized options.\n * Such string may be passed back to av_opt_set_from_string() in order to restore option values.\n * A key/value or pairs separator occurring in the serialized value or\n * name string are escaped through the av_escape() function.\n *\n * @param[in]  obj           AVClass object to serialize\n * @param[in]  opt_flags     serialize options with all the specified flags set (AV_OPT_FLAG)\n * @param[in]  flags         combination of AV_OPT_SERIALIZE_* flags\n * @param[out] buffer        Pointer to buffer that will be allocated with string containg serialized options.\n *                           Buffer must be freed by the caller when is no longer needed.\n * @param[in]  key_val_sep   character used to separate key from value\n * @param[in]  pairs_sep     character used to separate two pairs from each other\n * @return                   >= 0 on success, negative on error\n * @warning Separators cannot be neither '\\\\' nor '\\0'. They also cannot be the same.\n */\nint av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer,\n                     const char key_val_sep, const char pairs_sep);\n/**\n * @}\n */\n\n#endif /* AVUTIL_OPT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/parseutils.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_PARSEUTILS_H\n#define AVUTIL_PARSEUTILS_H\n\n#include <time.h>\n\n#include \"rational.h\"\n\n/**\n * @file\n * misc parsing utilities\n */\n\n/**\n * Parse str and store the parsed ratio in q.\n *\n * Note that a ratio with infinite (1/0) or negative value is\n * considered valid, so you should check on the returned value if you\n * want to exclude those values.\n *\n * The undefined value can be expressed using the \"0:0\" string.\n *\n * @param[in,out] q pointer to the AVRational which will contain the ratio\n * @param[in] str the string to parse: it has to be a string in the format\n * num:den, a float number or an expression\n * @param[in] max the maximum allowed numerator and denominator\n * @param[in] log_offset log level offset which is applied to the log\n * level of log_ctx\n * @param[in] log_ctx parent logging context\n * @return >= 0 on success, a negative error code otherwise\n */\nint av_parse_ratio(AVRational *q, const char *str, int max,\n                   int log_offset, void *log_ctx);\n\n#define av_parse_ratio_quiet(rate, str, max) \\\n    av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL)\n\n/**\n * Parse str and put in width_ptr and height_ptr the detected values.\n *\n * @param[in,out] width_ptr pointer to the variable which will contain the detected\n * width value\n * @param[in,out] height_ptr pointer to the variable which will contain the detected\n * height value\n * @param[in] str the string to parse: it has to be a string in the format\n * width x height or a valid video size abbreviation.\n * @return >= 0 on success, a negative error code otherwise\n */\nint av_parse_video_size(int *width_ptr, int *height_ptr, const char *str);\n\n/**\n * Parse str and store the detected values in *rate.\n *\n * @param[in,out] rate pointer to the AVRational which will contain the detected\n * frame rate\n * @param[in] str the string to parse: it has to be a string in the format\n * rate_num / rate_den, a float number or a valid video rate abbreviation\n * @return >= 0 on success, a negative error code otherwise\n */\nint av_parse_video_rate(AVRational *rate, const char *str);\n\n/**\n * Put the RGBA values that correspond to color_string in rgba_color.\n *\n * @param color_string a string specifying a color. It can be the name of\n * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,\n * possibly followed by \"@\" and a string representing the alpha\n * component.\n * The alpha component may be a string composed by \"0x\" followed by an\n * hexadecimal number or a decimal number between 0.0 and 1.0, which\n * represents the opacity value (0x00/0.0 means completely transparent,\n * 0xff/1.0 completely opaque).\n * If the alpha component is not specified then 0xff is assumed.\n * The string \"random\" will result in a random color.\n * @param slen length of the initial part of color_string containing the\n * color. It can be set to -1 if color_string is a null terminated string\n * containing nothing else than the color.\n * @return >= 0 in case of success, a negative value in case of\n * failure (for example if color_string cannot be parsed).\n */\nint av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,\n                   void *log_ctx);\n\n/**\n * Get the name of a color from the internal table of hard-coded named\n * colors.\n *\n * This function is meant to enumerate the color names recognized by\n * av_parse_color().\n *\n * @param color_idx index of the requested color, starting from 0\n * @param rgbp      if not NULL, will point to a 3-elements array with the color value in RGB\n * @return the color name string or NULL if color_idx is not in the array\n */\nconst char *av_get_known_color_name(int color_idx, const uint8_t **rgb);\n\n/**\n * Parse timestr and return in *time a corresponding number of\n * microseconds.\n *\n * @param timeval puts here the number of microseconds corresponding\n * to the string in timestr. If the string represents a duration, it\n * is the number of microseconds contained in the time interval.  If\n * the string is a date, is the number of microseconds since 1st of\n * January, 1970 up to the time of the parsed date.  If timestr cannot\n * be successfully parsed, set *time to INT64_MIN.\n\n * @param timestr a string representing a date or a duration.\n * - If a date the syntax is:\n * @code\n * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z]\n * now\n * @endcode\n * If the value is \"now\" it takes the current time.\n * Time is local time unless Z is appended, in which case it is\n * interpreted as UTC.\n * If the year-month-day part is not specified it takes the current\n * year-month-day.\n * - If a duration the syntax is:\n * @code\n * [-][HH:]MM:SS[.m...]\n * [-]S+[.m...]\n * @endcode\n * @param duration flag which tells how to interpret timestr, if not\n * zero timestr is interpreted as a duration, otherwise as a date\n * @return >= 0 in case of success, a negative value corresponding to an\n * AVERROR code otherwise\n */\nint av_parse_time(int64_t *timeval, const char *timestr, int duration);\n\n/**\n * Parse the input string p according to the format string fmt and\n * store its results in the structure dt.\n * This implementation supports only a subset of the formats supported\n * by the standard strptime().\n *\n * In particular it actually supports the parameters:\n * - %H: the hour as a decimal number, using a 24-hour clock, in the\n * range '00' through '23'\n * - %J: hours as a decimal number, in the range '0' through INT_MAX\n * - %M: the minute as a decimal number, using a 24-hour clock, in the\n * range '00' through '59'\n * - %S: the second as a decimal number, using a 24-hour clock, in the\n * range '00' through '59'\n * - %Y: the year as a decimal number, using the Gregorian calendar\n * - %m: the month as a decimal number, in the range '1' through '12'\n * - %d: the day of the month as a decimal number, in the range '1'\n * through '31'\n * - %%: a literal '%'\n *\n * @return a pointer to the first character not processed in this\n * function call, or NULL in case the function fails to match all of\n * the fmt string and therefore an error occurred\n */\nchar *av_small_strptime(const char *p, const char *fmt, struct tm *dt);\n\n/**\n * Attempt to find a specific tag in a URL.\n *\n * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.\n * Return 1 if found.\n */\nint av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);\n\n/**\n * Convert the decomposed UTC time in tm to a time_t value.\n */\ntime_t av_timegm(struct tm *tm);\n\n#endif /* AVUTIL_PARSEUTILS_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/pixdesc.h",
    "content": "/*\n * pixel format descriptor\n * Copyright (c) 2009 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_PIXDESC_H\n#define AVUTIL_PIXDESC_H\n\n#include <inttypes.h>\n\n#include \"attributes.h\"\n#include \"pixfmt.h\"\n\ntypedef struct AVComponentDescriptor {\n    /**\n     * Which of the 4 planes contains the component.\n     */\n    uint16_t plane        : 2;\n\n    /**\n     * Number of elements between 2 horizontally consecutive pixels minus 1.\n     * Elements are bits for bitstream formats, bytes otherwise.\n     */\n    uint16_t step_minus1  : 3;\n\n    /**\n     * Number of elements before the component of the first pixel plus 1.\n     * Elements are bits for bitstream formats, bytes otherwise.\n     */\n    uint16_t offset_plus1 : 3;\n\n    /**\n     * Number of least significant bits that must be shifted away\n     * to get the value.\n     */\n    uint16_t shift        : 3;\n\n    /**\n     * Number of bits in the component minus 1.\n     */\n    uint16_t depth_minus1 : 4;\n} AVComponentDescriptor;\n\n/**\n * Descriptor that unambiguously describes how the bits of a pixel are\n * stored in the up to 4 data planes of an image. It also stores the\n * subsampling factors and number of components.\n *\n * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV\n *       and all the YUV variants) AVPixFmtDescriptor just stores how values\n *       are stored not what these values represent.\n */\ntypedef struct AVPixFmtDescriptor {\n    const char *name;\n    uint8_t nb_components;  ///< The number of components each pixel has, (1-4)\n\n    /**\n     * Amount to shift the luma width right to find the chroma width.\n     * For YV12 this is 1 for example.\n     * chroma_width = -((-luma_width) >> log2_chroma_w)\n     * The note above is needed to ensure rounding up.\n     * This value only refers to the chroma components.\n     */\n    uint8_t log2_chroma_w;  ///< chroma_width = -((-luma_width )>>log2_chroma_w)\n\n    /**\n     * Amount to shift the luma height right to find the chroma height.\n     * For YV12 this is 1 for example.\n     * chroma_height= -((-luma_height) >> log2_chroma_h)\n     * The note above is needed to ensure rounding up.\n     * This value only refers to the chroma components.\n     */\n    uint8_t log2_chroma_h;\n    uint8_t flags;\n\n    /**\n     * Parameters that describe how pixels are packed.\n     * If the format has 2 or 4 components, then alpha is last.\n     * If the format has 1 or 2 components, then luma is 0.\n     * If the format has 3 or 4 components:\n     *   if the RGB flag is set then 0 is red, 1 is green and 2 is blue;\n     *   otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V.\n     */\n    AVComponentDescriptor comp[4];\n\n    /**\n     * Alternative comma-separated names.\n     */\n    const char *alias;\n} AVPixFmtDescriptor;\n\n/**\n * Pixel format is big-endian.\n */\n#define AV_PIX_FMT_FLAG_BE           (1 << 0)\n/**\n * Pixel format has a palette in data[1], values are indexes in this palette.\n */\n#define AV_PIX_FMT_FLAG_PAL          (1 << 1)\n/**\n * All values of a component are bit-wise packed end to end.\n */\n#define AV_PIX_FMT_FLAG_BITSTREAM    (1 << 2)\n/**\n * Pixel format is an HW accelerated format.\n */\n#define AV_PIX_FMT_FLAG_HWACCEL      (1 << 3)\n/**\n * At least one pixel component is not in the first data plane.\n */\n#define AV_PIX_FMT_FLAG_PLANAR       (1 << 4)\n/**\n * The pixel format contains RGB-like data (as opposed to YUV/grayscale).\n */\n#define AV_PIX_FMT_FLAG_RGB          (1 << 5)\n\n/**\n * The pixel format is \"pseudo-paletted\". This means that it contains a\n * fixed palette in the 2nd plane but the palette is fixed/constant for each\n * PIX_FMT. This allows interpreting the data as if it was PAL8, which can\n * in some cases be simpler. Or the data can be interpreted purely based on\n * the pixel format without using the palette.\n * An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8\n */\n#define AV_PIX_FMT_FLAG_PSEUDOPAL    (1 << 6)\n\n/**\n * The pixel format has an alpha channel. This is set on all formats that\n * support alpha in some way. The exception is AV_PIX_FMT_PAL8, which can\n * carry alpha as part of the palette. Details are explained in the\n * AVPixelFormat enum, and are also encoded in the corresponding\n * AVPixFmtDescriptor.\n *\n * The alpha is always straight, never pre-multiplied.\n *\n * If a codec or a filter does not support alpha, it should set all alpha to\n * opaque, or use the equivalent pixel formats without alpha component, e.g.\n * AV_PIX_FMT_RGB0 (or AV_PIX_FMT_RGB24 etc.) instead of AV_PIX_FMT_RGBA.\n */\n#define AV_PIX_FMT_FLAG_ALPHA        (1 << 7)\n\n#if FF_API_PIX_FMT\n/**\n * @deprecated use the AV_PIX_FMT_FLAG_* flags\n */\n#define PIX_FMT_BE        AV_PIX_FMT_FLAG_BE\n#define PIX_FMT_PAL       AV_PIX_FMT_FLAG_PAL\n#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM\n#define PIX_FMT_HWACCEL   AV_PIX_FMT_FLAG_HWACCEL\n#define PIX_FMT_PLANAR    AV_PIX_FMT_FLAG_PLANAR\n#define PIX_FMT_RGB       AV_PIX_FMT_FLAG_RGB\n#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL\n#define PIX_FMT_ALPHA     AV_PIX_FMT_FLAG_ALPHA\n#endif\n\n#if FF_API_PIX_FMT_DESC\n/**\n * The array of all the pixel format descriptors.\n */\nextern attribute_deprecated const AVPixFmtDescriptor av_pix_fmt_descriptors[];\n#endif\n\n/**\n * Read a line from an image, and write the values of the\n * pixel format component c to dst.\n *\n * @param data the array containing the pointers to the planes of the image\n * @param linesize the array containing the linesizes of the image\n * @param desc the pixel format descriptor for the image\n * @param x the horizontal coordinate of the first pixel to read\n * @param y the vertical coordinate of the first pixel to read\n * @param w the width of the line to read, that is the number of\n * values to write to dst\n * @param read_pal_component if not zero and the format is a paletted\n * format writes the values corresponding to the palette\n * component c in data[1] to dst, rather than the palette indexes in\n * data[0]. The behavior is undefined if the format is not paletted.\n */\nvoid av_read_image_line(uint16_t *dst, const uint8_t *data[4],\n                        const int linesize[4], const AVPixFmtDescriptor *desc,\n                        int x, int y, int c, int w, int read_pal_component);\n\n/**\n * Write the values from src to the pixel format component c of an\n * image line.\n *\n * @param src array containing the values to write\n * @param data the array containing the pointers to the planes of the\n * image to write into. It is supposed to be zeroed.\n * @param linesize the array containing the linesizes of the image\n * @param desc the pixel format descriptor for the image\n * @param x the horizontal coordinate of the first pixel to write\n * @param y the vertical coordinate of the first pixel to write\n * @param w the width of the line to write, that is the number of\n * values to write to the image line\n */\nvoid av_write_image_line(const uint16_t *src, uint8_t *data[4],\n                         const int linesize[4], const AVPixFmtDescriptor *desc,\n                         int x, int y, int c, int w);\n\n/**\n * Return the pixel format corresponding to name.\n *\n * If there is no pixel format with name name, then looks for a\n * pixel format with the name corresponding to the native endian\n * format of name.\n * For example in a little-endian system, first looks for \"gray16\",\n * then for \"gray16le\".\n *\n * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE.\n */\nenum AVPixelFormat av_get_pix_fmt(const char *name);\n\n/**\n * Return the short name for a pixel format, NULL in case pix_fmt is\n * unknown.\n *\n * @see av_get_pix_fmt(), av_get_pix_fmt_string()\n */\nconst char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt);\n\n/**\n * Print in buf the string corresponding to the pixel format with\n * number pix_fmt, or a header if pix_fmt is negative.\n *\n * @param buf the buffer where to write the string\n * @param buf_size the size of buf\n * @param pix_fmt the number of the pixel format to print the\n * corresponding info string, or a negative value to print the\n * corresponding header.\n */\nchar *av_get_pix_fmt_string(char *buf, int buf_size,\n                            enum AVPixelFormat pix_fmt);\n\n/**\n * Return the number of bits per pixel used by the pixel format\n * described by pixdesc. Note that this is not the same as the number\n * of bits per sample.\n *\n * The returned number of bits refers to the number of bits actually\n * used for storing the pixel information, that is padding bits are\n * not counted.\n */\nint av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);\n\n/**\n * Return the number of bits per pixel for the pixel format\n * described by pixdesc, including any padding or unused bits.\n */\nint av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);\n\n/**\n * @return a pixel format descriptor for provided pixel format or NULL if\n * this pixel format is unknown.\n */\nconst AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt);\n\n/**\n * Iterate over all pixel format descriptors known to libavutil.\n *\n * @param prev previous descriptor. NULL to get the first descriptor.\n *\n * @return next descriptor or NULL after the last descriptor\n */\nconst AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev);\n\n/**\n * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc\n * is not a valid pointer to a pixel format descriptor.\n */\nenum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);\n\n/**\n * Utility function to access log2_chroma_w log2_chroma_h from\n * the pixel format AVPixFmtDescriptor.\n *\n * See av_get_chroma_sub_sample() for a function that asserts a\n * valid pixel format instead of returning an error code.\n * Its recommended that you use avcodec_get_chroma_sub_sample unless\n * you do check the return code!\n *\n * @param[in]  pix_fmt the pixel format\n * @param[out] h_shift store log2_chroma_w\n * @param[out] v_shift store log2_chroma_h\n *\n * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format\n */\nint av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt,\n                                     int *h_shift, int *v_shift);\n\n/**\n * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a\n * valid pixel format.\n */\nint av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt);\n\n/**\n * Utility function to swap the endianness of a pixel format.\n *\n * @param[in]  pix_fmt the pixel format\n *\n * @return pixel format with swapped endianness if it exists,\n * otherwise AV_PIX_FMT_NONE\n */\nenum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt);\n\n#define FF_LOSS_RESOLUTION  0x0001 /**< loss due to resolution change */\n#define FF_LOSS_DEPTH       0x0002 /**< loss due to color depth change */\n#define FF_LOSS_COLORSPACE  0x0004 /**< loss due to color space conversion */\n#define FF_LOSS_ALPHA       0x0008 /**< loss of alpha bits */\n#define FF_LOSS_COLORQUANT  0x0010 /**< loss due to color quantization */\n#define FF_LOSS_CHROMA      0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */\n\n/**\n * Compute what kind of losses will occur when converting from one specific\n * pixel format to another.\n * When converting from one pixel format to another, information loss may occur.\n * For example, when converting from RGB24 to GRAY, the color information will\n * be lost. Similarly, other losses occur when converting from some formats to\n * other formats. These losses can involve loss of chroma, but also loss of\n * resolution, loss of color depth, loss due to the color space conversion, loss\n * of the alpha bits or loss due to color quantization.\n * av_get_fix_fmt_loss() informs you about the various types of losses\n * which will occur when converting from one pixel format to another.\n *\n * @param[in] dst_pix_fmt destination pixel format\n * @param[in] src_pix_fmt source pixel format\n * @param[in] has_alpha Whether the source pixel format alpha channel is used.\n * @return Combination of flags informing you what kind of losses will occur\n * (maximum loss for an invalid dst_pix_fmt).\n */\nint av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,\n                        enum AVPixelFormat src_pix_fmt,\n                        int has_alpha);\n\n/**\n * Compute what kind of losses will occur when converting from one specific\n * pixel format to another.\n * When converting from one pixel format to another, information loss may occur.\n * For example, when converting from RGB24 to GRAY, the color information will\n * be lost. Similarly, other losses occur when converting from some formats to\n * other formats. These losses can involve loss of chroma, but also loss of\n * resolution, loss of color depth, loss due to the color space conversion, loss\n * of the alpha bits or loss due to color quantization.\n * av_get_fix_fmt_loss() informs you about the various types of losses\n * which will occur when converting from one pixel format to another.\n *\n * @param[in] dst_pix_fmt destination pixel format\n * @param[in] src_pix_fmt source pixel format\n * @param[in] has_alpha Whether the source pixel format alpha channel is used.\n * @return Combination of flags informing you what kind of losses will occur\n * (maximum loss for an invalid dst_pix_fmt).\n */\nenum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,\n                                             enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);\n\n/**\n * @return the name for provided color range or NULL if unknown.\n */\nconst char *av_color_range_name(enum AVColorRange range);\n\n/**\n * @return the name for provided color primaries or NULL if unknown.\n */\nconst char *av_color_primaries_name(enum AVColorPrimaries primaries);\n\n/**\n * @return the name for provided color transfer or NULL if unknown.\n */\nconst char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer);\n\n/**\n * @return the name for provided color space or NULL if unknown.\n */\nconst char *av_color_space_name(enum AVColorSpace space);\n\n/**\n * @return the name for provided chroma location or NULL if unknown.\n */\nconst char *av_chroma_location_name(enum AVChromaLocation location);\n\n#endif /* AVUTIL_PIXDESC_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/pixelutils.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_PIXELUTILS_H\n#define AVUTIL_PIXELUTILS_H\n\n#include <stddef.h>\n#include <stdint.h>\n#include \"common.h\"\n\n/**\n * Sum of abs(src1[x] - src2[x])\n */\ntypedef int (*av_pixelutils_sad_fn)(const uint8_t *src1, ptrdiff_t stride1,\n                                    const uint8_t *src2, ptrdiff_t stride2);\n\n/**\n * Get a potentially optimized pointer to a Sum-of-absolute-differences\n * function (see the av_pixelutils_sad_fn prototype).\n *\n * @param w_bits  1<<w_bits is the requested width of the block size\n * @param h_bits  1<<h_bits is the requested height of the block size\n * @param aligned If set to 2, the returned sad function will assume src1 and\n *                src2 addresses are aligned on the block size.\n *                If set to 1, the returned sad function will assume src1 is\n *                aligned on the block size.\n *                If set to 0, the returned sad function assume no particular\n *                alignment.\n * @param log_ctx context used for logging, can be NULL\n *\n * @return a pointer to the SAD function or NULL in case of error (because of\n *         invalid parameters)\n */\nav_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits,\n                                              int aligned, void *log_ctx);\n\n#endif /* AVUTIL_PIXELUTILS_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/pixfmt.h",
    "content": "/*\n * copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_PIXFMT_H\n#define AVUTIL_PIXFMT_H\n\n/**\n * @file\n * pixel format definitions\n *\n */\n\n#include \"libavutil/avconfig.h\"\n#include \"version.h\"\n\n#define AVPALETTE_SIZE 1024\n#define AVPALETTE_COUNT 256\n\n/**\n * Pixel format.\n *\n * @note\n * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA\n * color is put together as:\n *  (A << 24) | (R << 16) | (G << 8) | B\n * This is stored as BGRA on little-endian CPU architectures and ARGB on\n * big-endian CPUs.\n *\n * @par\n * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized\n * image data is stored in AVFrame.data[0]. The palette is transported in\n * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is\n * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is\n * also endian-specific). Note also that the individual RGB32 palette\n * components stored in AVFrame.data[1] should be in the range 0..255.\n * This is important as many custom PAL8 video codecs that were designed\n * to run on the IBM VGA graphics adapter use 6-bit palette components.\n *\n * @par\n * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like\n * for pal8. This palette is filled in automatically by the function\n * allocating the picture.\n *\n * @note\n * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1\n * and that all newly added little-endian formats have (pix_fmt & 1) == 0.\n * This allows simpler detection of big vs little-endian.\n */\nenum AVPixelFormat {\n    AV_PIX_FMT_NONE = -1,\n    AV_PIX_FMT_YUV420P,   ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)\n    AV_PIX_FMT_YUYV422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr\n    AV_PIX_FMT_RGB24,     ///< packed RGB 8:8:8, 24bpp, RGBRGB...\n    AV_PIX_FMT_BGR24,     ///< packed RGB 8:8:8, 24bpp, BGRBGR...\n    AV_PIX_FMT_YUV422P,   ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)\n    AV_PIX_FMT_YUV444P,   ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)\n    AV_PIX_FMT_YUV410P,   ///< planar YUV 4:1:0,  9bpp, (1 Cr & Cb sample per 4x4 Y samples)\n    AV_PIX_FMT_YUV411P,   ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)\n    AV_PIX_FMT_GRAY8,     ///<        Y        ,  8bpp\n    AV_PIX_FMT_MONOWHITE, ///<        Y        ,  1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb\n    AV_PIX_FMT_MONOBLACK, ///<        Y        ,  1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb\n    AV_PIX_FMT_PAL8,      ///< 8 bit with PIX_FMT_RGB32 palette\n    AV_PIX_FMT_YUVJ420P,  ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range\n    AV_PIX_FMT_YUVJ422P,  ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range\n    AV_PIX_FMT_YUVJ444P,  ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range\n#if FF_API_XVMC\n    AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing\n    AV_PIX_FMT_XVMC_MPEG2_IDCT,\n#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT\n#endif /* FF_API_XVMC */\n    AV_PIX_FMT_UYVY422,   ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1\n    AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3\n    AV_PIX_FMT_BGR8,      ///< packed RGB 3:3:2,  8bpp, (msb)2B 3G 3R(lsb)\n    AV_PIX_FMT_BGR4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits\n    AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1B 2G 1R(lsb)\n    AV_PIX_FMT_RGB8,      ///< packed RGB 3:3:2,  8bpp, (msb)2R 3G 3B(lsb)\n    AV_PIX_FMT_RGB4,      ///< packed RGB 1:2:1 bitstream,  4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits\n    AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1,  8bpp, (msb)1R 2G 1B(lsb)\n    AV_PIX_FMT_NV12,      ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)\n    AV_PIX_FMT_NV21,      ///< as above, but U and V bytes are swapped\n\n    AV_PIX_FMT_ARGB,      ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...\n    AV_PIX_FMT_RGBA,      ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...\n    AV_PIX_FMT_ABGR,      ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...\n    AV_PIX_FMT_BGRA,      ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...\n\n    AV_PIX_FMT_GRAY16BE,  ///<        Y        , 16bpp, big-endian\n    AV_PIX_FMT_GRAY16LE,  ///<        Y        , 16bpp, little-endian\n    AV_PIX_FMT_YUV440P,   ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)\n    AV_PIX_FMT_YUVJ440P,  ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range\n    AV_PIX_FMT_YUVA420P,  ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)\n#if FF_API_VDPAU\n    AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n    AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n#endif\n    AV_PIX_FMT_RGB48BE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian\n    AV_PIX_FMT_RGB48LE,   ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian\n\n    AV_PIX_FMT_RGB565BE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), big-endian\n    AV_PIX_FMT_RGB565LE,  ///< packed RGB 5:6:5, 16bpp, (msb)   5R 6G 5B(lsb), little-endian\n    AV_PIX_FMT_RGB555BE,  ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian   , X=unused/undefined\n    AV_PIX_FMT_RGB555LE,  ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined\n\n    AV_PIX_FMT_BGR565BE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), big-endian\n    AV_PIX_FMT_BGR565LE,  ///< packed BGR 5:6:5, 16bpp, (msb)   5B 6G 5R(lsb), little-endian\n    AV_PIX_FMT_BGR555BE,  ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian   , X=unused/undefined\n    AV_PIX_FMT_BGR555LE,  ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined\n\n    AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers\n    AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers\n    AV_PIX_FMT_VAAPI_VLD,  ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n\n    AV_PIX_FMT_YUV420P16LE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV420P16BE,  ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV422P16LE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_YUV422P16BE,  ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P16LE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P16BE,  ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n#if FF_API_VDPAU\n    AV_PIX_FMT_VDPAU_MPEG4,  ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers\n#endif\n    AV_PIX_FMT_DXVA2_VLD,    ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer\n\n    AV_PIX_FMT_RGB444LE,  ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined\n    AV_PIX_FMT_RGB444BE,  ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian,    X=unused/undefined\n    AV_PIX_FMT_BGR444LE,  ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined\n    AV_PIX_FMT_BGR444BE,  ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian,    X=unused/undefined\n    AV_PIX_FMT_YA8,       ///< 8bit gray, 8bit alpha\n\n    AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8\n    AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8\n\n    AV_PIX_FMT_BGR48BE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian\n    AV_PIX_FMT_BGR48LE,   ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian\n\n    /**\n     * The following 12 formats have the disadvantage of needing 1 format for each bit depth.\n     * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.\n     * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.\n     */\n    AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_VDA_VLD,    ///< hardware decoding through VDA\n\n#ifdef AV_PIX_FMT_ABI_GIT_MASTER\n    AV_PIX_FMT_RGBA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    AV_PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n#endif\n    AV_PIX_FMT_GBRP,      ///< planar GBR 4:4:4 24bpp\n    AV_PIX_FMT_GBRP9BE,   ///< planar GBR 4:4:4 27bpp, big-endian\n    AV_PIX_FMT_GBRP9LE,   ///< planar GBR 4:4:4 27bpp, little-endian\n    AV_PIX_FMT_GBRP10BE,  ///< planar GBR 4:4:4 30bpp, big-endian\n    AV_PIX_FMT_GBRP10LE,  ///< planar GBR 4:4:4 30bpp, little-endian\n    AV_PIX_FMT_GBRP16BE,  ///< planar GBR 4:4:4 48bpp, big-endian\n    AV_PIX_FMT_GBRP16LE,  ///< planar GBR 4:4:4 48bpp, little-endian\n\n    /**\n     * duplicated pixel formats for compatibility with libav.\n     * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55)\n     * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85)\n     */\n    AV_PIX_FMT_YUVA422P_LIBAV,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)\n    AV_PIX_FMT_YUVA444P_LIBAV,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)\n\n    AV_PIX_FMT_YUVA420P9BE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian\n    AV_PIX_FMT_YUVA420P9LE,  ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian\n    AV_PIX_FMT_YUVA422P9BE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian\n    AV_PIX_FMT_YUVA422P9LE,  ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian\n    AV_PIX_FMT_YUVA444P9BE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian\n    AV_PIX_FMT_YUVA444P9LE,  ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian\n    AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)\n    AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)\n    AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)\n\n    AV_PIX_FMT_VDPAU,     ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface\n\n    AV_PIX_FMT_XYZ12LE,      ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0\n    AV_PIX_FMT_XYZ12BE,      ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0\n    AV_PIX_FMT_NV16,         ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)\n    AV_PIX_FMT_NV20LE,       ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_NV20BE,       ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n\n    /**\n     * duplicated pixel formats for compatibility with libav.\n     * FFmpeg supports these formats since Sat Sep 24 06:01:45 2011 +0200 (commits 9569a3c9f41387a8c7d1ce97d8693520477a66c3)\n     * also see Fri Nov 25 01:38:21 2011 +0100 92afb431621c79155fcb7171d26f137eb1bee028\n     * Libav added them Sun Mar 16 23:05:47 2014 +0100 with incompatible values (commit 1481d24c3a0abf81e1d7a514547bd5305232be30)\n     */\n    AV_PIX_FMT_RGBA64BE_LIBAV,     ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_RGBA64LE_LIBAV,     ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    AV_PIX_FMT_BGRA64BE_LIBAV,     ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_BGRA64LE_LIBAV,     ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n\n    AV_PIX_FMT_YVYU422,   ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb\n\n    AV_PIX_FMT_VDA,          ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef\n\n    AV_PIX_FMT_YA16BE,       ///< 16bit gray, 16bit alpha (big-endian)\n    AV_PIX_FMT_YA16LE,       ///< 16bit gray, 16bit alpha (little-endian)\n\n    /**\n     * duplicated pixel formats for compatibility with libav.\n     * FFmpeg supports these formats since May 3 2013 (commit e6d4e687558d08187e7a415a7725e4b1a416f782)\n     * Libav added them Jan 14 2015 with incompatible values (commit 0e6c7dfa650e8b0497bfa7a06394b7a462ddc33a)\n     */\n    AV_PIX_FMT_GBRAP_LIBAV,        ///< planar GBRA 4:4:4:4 32bpp\n    AV_PIX_FMT_GBRAP16BE_LIBAV,    ///< planar GBRA 4:4:4:4 64bpp, big-endian\n    AV_PIX_FMT_GBRAP16LE_LIBAV,    ///< planar GBRA 4:4:4:4 64bpp, little-endian\n    /**\n     *  HW acceleration through QSV, data[3] contains a pointer to the\n     *  mfxFrameSurface1 structure.\n     */\n    AV_PIX_FMT_QSV,\n\n#ifndef AV_PIX_FMT_ABI_GIT_MASTER\n    AV_PIX_FMT_RGBA64BE=0x123,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_RGBA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n    AV_PIX_FMT_BGRA64BE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian\n    AV_PIX_FMT_BGRA64LE,  ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian\n#endif\n    AV_PIX_FMT_0RGB=0x123+4,///< packed RGB 8:8:8, 32bpp, XRGBXRGB...   X=unused/undefined\n    AV_PIX_FMT_RGB0,        ///< packed RGB 8:8:8, 32bpp, RGBXRGBX...   X=unused/undefined\n    AV_PIX_FMT_0BGR,        ///< packed BGR 8:8:8, 32bpp, XBGRXBGR...   X=unused/undefined\n    AV_PIX_FMT_BGR0,        ///< packed BGR 8:8:8, 32bpp, BGRXBGRX...   X=unused/undefined\n    AV_PIX_FMT_YUVA444P,  ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)\n    AV_PIX_FMT_YUVA422P,  ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)\n\n    AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian\n    AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian\n    AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian\n    AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian\n    AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian\n    AV_PIX_FMT_GBRP12BE,    ///< planar GBR 4:4:4 36bpp, big-endian\n    AV_PIX_FMT_GBRP12LE,    ///< planar GBR 4:4:4 36bpp, little-endian\n    AV_PIX_FMT_GBRP14BE,    ///< planar GBR 4:4:4 42bpp, big-endian\n    AV_PIX_FMT_GBRP14LE,    ///< planar GBR 4:4:4 42bpp, little-endian\n    AV_PIX_FMT_GBRAP,       ///< planar GBRA 4:4:4:4 32bpp\n    AV_PIX_FMT_GBRAP16BE,   ///< planar GBRA 4:4:4:4 64bpp, big-endian\n    AV_PIX_FMT_GBRAP16LE,   ///< planar GBRA 4:4:4:4 64bpp, little-endian\n    AV_PIX_FMT_YUVJ411P,    ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range\n\n    AV_PIX_FMT_BAYER_BGGR8,    ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */\n    AV_PIX_FMT_BAYER_RGGB8,    ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */\n    AV_PIX_FMT_BAYER_GBRG8,    ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */\n    AV_PIX_FMT_BAYER_GRBG8,    ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */\n    AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */\n    AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */\n    AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */\n    AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */\n    AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */\n    AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */\n    AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */\n    AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */\n#if !FF_API_XVMC\n    AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing\n#endif /* !FF_API_XVMC */\n\n    AV_PIX_FMT_NB,        ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions\n\n#if FF_API_PIX_FMT\n#include \"old_pix_fmts.h\"\n#endif\n};\n\n#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI\n#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV\n#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV\n#define AV_PIX_FMT_RGBA64BE AV_PIX_FMT_RGBA64BE_LIBAV\n#define AV_PIX_FMT_RGBA64LE AV_PIX_FMT_RGBA64LE_LIBAV\n#define AV_PIX_FMT_BGRA64BE AV_PIX_FMT_BGRA64BE_LIBAV\n#define AV_PIX_FMT_BGRA64LE AV_PIX_FMT_BGRA64LE_LIBAV\n#define AV_PIX_FMT_GBRAP     AV_PIX_FMT_GBRAP_LIBAV\n#define AV_PIX_FMT_GBRAP16BE AV_PIX_FMT_GBRAP16BE_LIBAV\n#define AV_PIX_FMT_GBRAP16LE AV_PIX_FMT_GBRAP16LE_LIBAV\n#endif\n\n\n#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A\n#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP\n\n#if AV_HAVE_BIGENDIAN\n#   define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be\n#else\n#   define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le\n#endif\n\n#define AV_PIX_FMT_RGB32   AV_PIX_FMT_NE(ARGB, BGRA)\n#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)\n#define AV_PIX_FMT_BGR32   AV_PIX_FMT_NE(ABGR, RGBA)\n#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)\n#define AV_PIX_FMT_0RGB32  AV_PIX_FMT_NE(0RGB, BGR0)\n#define AV_PIX_FMT_0BGR32  AV_PIX_FMT_NE(0BGR, RGB0)\n\n#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)\n#define AV_PIX_FMT_YA16   AV_PIX_FMT_NE(YA16BE,   YA16LE)\n#define AV_PIX_FMT_RGB48  AV_PIX_FMT_NE(RGB48BE,  RGB48LE)\n#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)\n#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)\n#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)\n#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)\n#define AV_PIX_FMT_BGR48  AV_PIX_FMT_NE(BGR48BE,  BGR48LE)\n#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)\n#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)\n#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)\n#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)\n\n#define AV_PIX_FMT_YUV420P9  AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)\n#define AV_PIX_FMT_YUV422P9  AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)\n#define AV_PIX_FMT_YUV444P9  AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)\n#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)\n#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)\n#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)\n#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)\n#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)\n#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)\n#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)\n#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)\n#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)\n#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)\n#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)\n#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)\n\n#define AV_PIX_FMT_GBRP9     AV_PIX_FMT_NE(GBRP9BE ,    GBRP9LE)\n#define AV_PIX_FMT_GBRP10    AV_PIX_FMT_NE(GBRP10BE,    GBRP10LE)\n#define AV_PIX_FMT_GBRP12    AV_PIX_FMT_NE(GBRP12BE,    GBRP12LE)\n#define AV_PIX_FMT_GBRP14    AV_PIX_FMT_NE(GBRP14BE,    GBRP14LE)\n#define AV_PIX_FMT_GBRP16    AV_PIX_FMT_NE(GBRP16BE,    GBRP16LE)\n#define AV_PIX_FMT_GBRAP16   AV_PIX_FMT_NE(GBRAP16BE,   GBRAP16LE)\n\n#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE,    BAYER_BGGR16LE)\n#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE,    BAYER_RGGB16LE)\n#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE,    BAYER_GBRG16LE)\n#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE,    BAYER_GRBG16LE)\n\n\n#define AV_PIX_FMT_YUVA420P9  AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)\n#define AV_PIX_FMT_YUVA422P9  AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)\n#define AV_PIX_FMT_YUVA444P9  AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)\n#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)\n#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)\n#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)\n#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)\n#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)\n#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)\n\n#define AV_PIX_FMT_XYZ12      AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)\n#define AV_PIX_FMT_NV20       AV_PIX_FMT_NE(NV20BE,  NV20LE)\n\n\n#if FF_API_PIX_FMT\n#define PixelFormat AVPixelFormat\n\n#define PIX_FMT_Y400A AV_PIX_FMT_Y400A\n#define PIX_FMT_GBR24P AV_PIX_FMT_GBR24P\n\n#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le)\n\n#define PIX_FMT_RGB32   AV_PIX_FMT_RGB32\n#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1\n#define PIX_FMT_BGR32   AV_PIX_FMT_BGR32\n#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1\n#define PIX_FMT_0RGB32  AV_PIX_FMT_0RGB32\n#define PIX_FMT_0BGR32  AV_PIX_FMT_0BGR32\n\n#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16\n#define PIX_FMT_RGB48  AV_PIX_FMT_RGB48\n#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565\n#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555\n#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444\n#define PIX_FMT_BGR48  AV_PIX_FMT_BGR48\n#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565\n#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555\n#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444\n\n#define PIX_FMT_YUV420P9  AV_PIX_FMT_YUV420P9\n#define PIX_FMT_YUV422P9  AV_PIX_FMT_YUV422P9\n#define PIX_FMT_YUV444P9  AV_PIX_FMT_YUV444P9\n#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10\n#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10\n#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10\n#define PIX_FMT_YUV420P12 AV_PIX_FMT_YUV420P12\n#define PIX_FMT_YUV422P12 AV_PIX_FMT_YUV422P12\n#define PIX_FMT_YUV444P12 AV_PIX_FMT_YUV444P12\n#define PIX_FMT_YUV420P14 AV_PIX_FMT_YUV420P14\n#define PIX_FMT_YUV422P14 AV_PIX_FMT_YUV422P14\n#define PIX_FMT_YUV444P14 AV_PIX_FMT_YUV444P14\n#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16\n#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16\n#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16\n\n#define PIX_FMT_RGBA64 AV_PIX_FMT_RGBA64\n#define PIX_FMT_BGRA64 AV_PIX_FMT_BGRA64\n#define PIX_FMT_GBRP9  AV_PIX_FMT_GBRP9\n#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10\n#define PIX_FMT_GBRP12 AV_PIX_FMT_GBRP12\n#define PIX_FMT_GBRP14 AV_PIX_FMT_GBRP14\n#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16\n#endif\n\n/**\n  * Chromaticity coordinates of the source primaries.\n  */\nenum AVColorPrimaries {\n    AVCOL_PRI_RESERVED0   = 0,\n    AVCOL_PRI_BT709       = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B\n    AVCOL_PRI_UNSPECIFIED = 2,\n    AVCOL_PRI_RESERVED    = 3,\n    AVCOL_PRI_BT470M      = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)\n\n    AVCOL_PRI_BT470BG     = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM\n    AVCOL_PRI_SMPTE170M   = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC\n    AVCOL_PRI_SMPTE240M   = 7, ///< functionally identical to above\n    AVCOL_PRI_FILM        = 8, ///< colour filters using Illuminant C\n    AVCOL_PRI_BT2020      = 9, ///< ITU-R BT2020\n    AVCOL_PRI_NB,              ///< Not part of ABI\n};\n\n/**\n * Color Transfer Characteristic.\n */\nenum AVColorTransferCharacteristic {\n    AVCOL_TRC_RESERVED0    = 0,\n    AVCOL_TRC_BT709        = 1,  ///< also ITU-R BT1361\n    AVCOL_TRC_UNSPECIFIED  = 2,\n    AVCOL_TRC_RESERVED     = 3,\n    AVCOL_TRC_GAMMA22      = 4,  ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM\n    AVCOL_TRC_GAMMA28      = 5,  ///< also ITU-R BT470BG\n    AVCOL_TRC_SMPTE170M    = 6,  ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC\n    AVCOL_TRC_SMPTE240M    = 7,\n    AVCOL_TRC_LINEAR       = 8,  ///< \"Linear transfer characteristics\"\n    AVCOL_TRC_LOG          = 9,  ///< \"Logarithmic transfer characteristic (100:1 range)\"\n    AVCOL_TRC_LOG_SQRT     = 10, ///< \"Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)\"\n    AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4\n    AVCOL_TRC_BT1361_ECG   = 12, ///< ITU-R BT1361 Extended Colour Gamut\n    AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)\n    AVCOL_TRC_BT2020_10    = 14, ///< ITU-R BT2020 for 10 bit system\n    AVCOL_TRC_BT2020_12    = 15, ///< ITU-R BT2020 for 12 bit system\n    AVCOL_TRC_NB,                ///< Not part of ABI\n};\n\n/**\n * YUV colorspace type.\n */\nenum AVColorSpace {\n    AVCOL_SPC_RGB         = 0,  ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)\n    AVCOL_SPC_BT709       = 1,  ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B\n    AVCOL_SPC_UNSPECIFIED = 2,\n    AVCOL_SPC_RESERVED    = 3,\n    AVCOL_SPC_FCC         = 4,  ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)\n    AVCOL_SPC_BT470BG     = 5,  ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601\n    AVCOL_SPC_SMPTE170M   = 6,  ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above\n    AVCOL_SPC_SMPTE240M   = 7,\n    AVCOL_SPC_YCOCG       = 8,  ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16\n    AVCOL_SPC_BT2020_NCL  = 9,  ///< ITU-R BT2020 non-constant luminance system\n    AVCOL_SPC_BT2020_CL   = 10, ///< ITU-R BT2020 constant luminance system\n    AVCOL_SPC_NB,               ///< Not part of ABI\n};\n#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG\n\n\n/**\n * MPEG vs JPEG YUV range.\n */\nenum AVColorRange {\n    AVCOL_RANGE_UNSPECIFIED = 0,\n    AVCOL_RANGE_MPEG        = 1, ///< the normal 219*2^(n-8) \"MPEG\" YUV ranges\n    AVCOL_RANGE_JPEG        = 2, ///< the normal     2^n-1   \"JPEG\" YUV ranges\n    AVCOL_RANGE_NB,              ///< Not part of ABI\n};\n\n/**\n * Location of chroma samples.\n *\n *  X   X      3 4 X      X are luma samples,\n *             1 2        1-6 are possible chroma positions\n *  X   X      5 6 X      0 is undefined/unknown position\n */\nenum AVChromaLocation {\n    AVCHROMA_LOC_UNSPECIFIED = 0,\n    AVCHROMA_LOC_LEFT        = 1, ///< mpeg2/4, h264 default\n    AVCHROMA_LOC_CENTER      = 2, ///< mpeg1, jpeg, h263\n    AVCHROMA_LOC_TOPLEFT     = 3, ///< DV\n    AVCHROMA_LOC_TOP         = 4,\n    AVCHROMA_LOC_BOTTOMLEFT  = 5,\n    AVCHROMA_LOC_BOTTOM      = 6,\n    AVCHROMA_LOC_NB,              ///< Not part of ABI\n};\n\n#endif /* AVUTIL_PIXFMT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/random_seed.h",
    "content": "/*\n * Copyright (c) 2009 Baptiste Coudurier <baptiste.coudurier@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_RANDOM_SEED_H\n#define AVUTIL_RANDOM_SEED_H\n\n#include <stdint.h>\n/**\n * @addtogroup lavu_crypto\n * @{\n */\n\n/**\n * Get a seed to use in conjunction with random functions.\n * This function tries to provide a good seed at a best effort bases.\n * Its possible to call this function multiple times if more bits are needed.\n * It can be quite slow, which is why it should only be used as seed for a faster\n * PRNG. The quality of the seed depends on the platform.\n */\nuint32_t av_get_random_seed(void);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_RANDOM_SEED_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/rational.h",
    "content": "/*\n * rational numbers\n * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * rational numbers\n * @author Michael Niedermayer <michaelni@gmx.at>\n */\n\n#ifndef AVUTIL_RATIONAL_H\n#define AVUTIL_RATIONAL_H\n\n#include <stdint.h>\n#include <limits.h>\n#include \"attributes.h\"\n\n/**\n * @addtogroup lavu_math\n * @{\n */\n\n/**\n * rational number numerator/denominator\n */\ntypedef struct AVRational{\n    int num; ///< numerator\n    int den; ///< denominator\n} AVRational;\n\n/**\n * Create a rational.\n * Useful for compilers that do not support compound literals.\n * @note  The return value is not reduced.\n */\nstatic inline AVRational av_make_q(int num, int den)\n{\n    AVRational r = { num, den };\n    return r;\n}\n\n/**\n * Compare two rationals.\n * @param a first rational\n * @param b second rational\n * @return 0 if a==b, 1 if a>b, -1 if a<b, and INT_MIN if one of the\n * values is of the form 0/0\n */\nstatic inline int av_cmp_q(AVRational a, AVRational b){\n    const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den;\n\n    if(tmp) return (int)((tmp ^ a.den ^ b.den)>>63)|1;\n    else if(b.den && a.den) return 0;\n    else if(a.num && b.num) return (a.num>>31) - (b.num>>31);\n    else                    return INT_MIN;\n}\n\n/**\n * Convert rational to double.\n * @param a rational to convert\n * @return (double) a\n */\nstatic inline double av_q2d(AVRational a){\n    return a.num / (double) a.den;\n}\n\n/**\n * Reduce a fraction.\n * This is useful for framerate calculations.\n * @param dst_num destination numerator\n * @param dst_den destination denominator\n * @param num source numerator\n * @param den source denominator\n * @param max the maximum allowed for dst_num & dst_den\n * @return 1 if exact, 0 otherwise\n */\nint av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);\n\n/**\n * Multiply two rationals.\n * @param b first rational\n * @param c second rational\n * @return b*c\n */\nAVRational av_mul_q(AVRational b, AVRational c) av_const;\n\n/**\n * Divide one rational by another.\n * @param b first rational\n * @param c second rational\n * @return b/c\n */\nAVRational av_div_q(AVRational b, AVRational c) av_const;\n\n/**\n * Add two rationals.\n * @param b first rational\n * @param c second rational\n * @return b+c\n */\nAVRational av_add_q(AVRational b, AVRational c) av_const;\n\n/**\n * Subtract one rational from another.\n * @param b first rational\n * @param c second rational\n * @return b-c\n */\nAVRational av_sub_q(AVRational b, AVRational c) av_const;\n\n/**\n * Invert a rational.\n * @param q value\n * @return 1 / q\n */\nstatic av_always_inline AVRational av_inv_q(AVRational q)\n{\n    AVRational r = { q.den, q.num };\n    return r;\n}\n\n/**\n * Convert a double precision floating point number to a rational.\n * inf is expressed as {1,0} or {-1,0} depending on the sign.\n *\n * @param d double to convert\n * @param max the maximum allowed numerator and denominator\n * @return (AVRational) d\n */\nAVRational av_d2q(double d, int max) av_const;\n\n/**\n * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer\n * than q1, 0 if they have the same distance.\n */\nint av_nearer_q(AVRational q, AVRational q1, AVRational q2);\n\n/**\n * Find the nearest value in q_list to q.\n * @param q_list an array of rationals terminated by {0, 0}\n * @return the index of the nearest value found in the array\n */\nint av_find_nearest_q_idx(AVRational q, const AVRational* q_list);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_RATIONAL_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/replaygain.h",
    "content": "/*\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_REPLAYGAIN_H\n#define AVUTIL_REPLAYGAIN_H\n\n#include <stdint.h>\n\n/**\n * ReplayGain information (see\n * http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification).\n * The size of this struct is a part of the public ABI.\n */\ntypedef struct AVReplayGain {\n    /**\n     * Track replay gain in microbels (divide by 100000 to get the value in dB).\n     * Should be set to INT32_MIN when unknown.\n     */\n    int32_t track_gain;\n    /**\n     * Peak track amplitude, with 100000 representing full scale (but values\n     * may overflow). 0 when unknown.\n     */\n    uint32_t track_peak;\n    /**\n     * Same as track_gain, but for the whole album.\n     */\n    int32_t album_gain;\n    /**\n     * Same as track_peak, but for the whole album,\n     */\n    uint32_t album_peak;\n} AVReplayGain;\n\n#endif /* AVUTIL_REPLAYGAIN_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/ripemd.h",
    "content": "/*\n * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>\n * Copyright (C) 2013 James Almer <jamrial@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_RIPEMD_H\n#define AVUTIL_RIPEMD_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_ripemd RIPEMD\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_ripemd_size;\n\nstruct AVRIPEMD;\n\n/**\n * Allocate an AVRIPEMD context.\n */\nstruct AVRIPEMD *av_ripemd_alloc(void);\n\n/**\n * Initialize RIPEMD hashing.\n *\n * @param context pointer to the function context (of size av_ripemd_size)\n * @param bits    number of bits in digest (128, 160, 256 or 320 bits)\n * @return        zero if initialization succeeded, -1 otherwise\n */\nint av_ripemd_init(struct AVRIPEMD* context, int bits);\n\n/**\n * Update hash value.\n *\n * @param context hash function context\n * @param data    input data to update hash with\n * @param len     input data length\n */\nvoid av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len);\n\n/**\n * Finish hashing and output digest value.\n *\n * @param context hash function context\n * @param digest  buffer where output digest value is stored\n */\nvoid av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_RIPEMD_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/samplefmt.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_SAMPLEFMT_H\n#define AVUTIL_SAMPLEFMT_H\n\n#include <stdint.h>\n\n#include \"avutil.h\"\n#include \"attributes.h\"\n\n/**\n * @addtogroup lavu_audio\n * @{\n *\n * @defgroup lavu_sampfmts Audio sample formats\n *\n * Audio sample format enumeration and related convenience functions.\n * @{\n *\n */\n\n/**\n * Audio sample formats\n *\n * - The data described by the sample format is always in native-endian order.\n *   Sample values can be expressed by native C types, hence the lack of a signed\n *   24-bit sample format even though it is a common raw audio data format.\n *\n * - The floating-point formats are based on full volume being in the range\n *   [-1.0, 1.0]. Any values outside this range are beyond full volume level.\n *\n * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg\n *   (such as AVFrame in libavcodec) is as follows:\n *\n * @par\n * For planar sample formats, each audio channel is in a separate data plane,\n * and linesize is the buffer size, in bytes, for a single plane. All data\n * planes must be the same size. For packed sample formats, only the first data\n * plane is used, and samples for each channel are interleaved. In this case,\n * linesize is the buffer size, in bytes, for the 1 plane.\n *\n */\nenum AVSampleFormat {\n    AV_SAMPLE_FMT_NONE = -1,\n    AV_SAMPLE_FMT_U8,          ///< unsigned 8 bits\n    AV_SAMPLE_FMT_S16,         ///< signed 16 bits\n    AV_SAMPLE_FMT_S32,         ///< signed 32 bits\n    AV_SAMPLE_FMT_FLT,         ///< float\n    AV_SAMPLE_FMT_DBL,         ///< double\n\n    AV_SAMPLE_FMT_U8P,         ///< unsigned 8 bits, planar\n    AV_SAMPLE_FMT_S16P,        ///< signed 16 bits, planar\n    AV_SAMPLE_FMT_S32P,        ///< signed 32 bits, planar\n    AV_SAMPLE_FMT_FLTP,        ///< float, planar\n    AV_SAMPLE_FMT_DBLP,        ///< double, planar\n\n    AV_SAMPLE_FMT_NB           ///< Number of sample formats. DO NOT USE if linking dynamically\n};\n\n/**\n * Return the name of sample_fmt, or NULL if sample_fmt is not\n * recognized.\n */\nconst char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);\n\n/**\n * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE\n * on error.\n */\nenum AVSampleFormat av_get_sample_fmt(const char *name);\n\n/**\n * Return the planar<->packed alternative form of the given sample format, or\n * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the\n * requested planar/packed format, the format returned is the same as the\n * input.\n */\nenum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar);\n\n/**\n * Get the packed alternative form of the given sample format.\n *\n * If the passed sample_fmt is already in packed format, the format returned is\n * the same as the input.\n *\n * @return  the packed alternative form of the given sample format or\n            AV_SAMPLE_FMT_NONE on error.\n */\nenum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);\n\n/**\n * Get the planar alternative form of the given sample format.\n *\n * If the passed sample_fmt is already in planar format, the format returned is\n * the same as the input.\n *\n * @return  the planar alternative form of the given sample format or\n            AV_SAMPLE_FMT_NONE on error.\n */\nenum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);\n\n/**\n * Generate a string corresponding to the sample format with\n * sample_fmt, or a header if sample_fmt is negative.\n *\n * @param buf the buffer where to write the string\n * @param buf_size the size of buf\n * @param sample_fmt the number of the sample format to print the\n * corresponding info string, or a negative value to print the\n * corresponding header.\n * @return the pointer to the filled buffer or NULL if sample_fmt is\n * unknown or in case of other errors\n */\nchar *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);\n\n/**\n * Return number of bytes per sample.\n *\n * @param sample_fmt the sample format\n * @return number of bytes per sample or zero if unknown for the given\n * sample format\n */\nint av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);\n\n/**\n * Check if the sample format is planar.\n *\n * @param sample_fmt the sample format to inspect\n * @return 1 if the sample format is planar, 0 if it is interleaved\n */\nint av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);\n\n/**\n * Get the required buffer size for the given audio parameters.\n *\n * @param[out] linesize calculated linesize, may be NULL\n * @param nb_channels   the number of channels\n * @param nb_samples    the number of samples in a single channel\n * @param sample_fmt    the sample format\n * @param align         buffer size alignment (0 = default, 1 = no alignment)\n * @return              required buffer size, or negative error code on failure\n */\nint av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,\n                               enum AVSampleFormat sample_fmt, int align);\n\n/**\n * @}\n *\n * @defgroup lavu_sampmanip Samples manipulation\n *\n * Functions that manipulate audio samples\n * @{\n */\n\n/**\n * Fill plane data pointers and linesize for samples with sample\n * format sample_fmt.\n *\n * The audio_data array is filled with the pointers to the samples data planes:\n * for planar, set the start point of each channel's data within the buffer,\n * for packed, set the start point of the entire buffer only.\n *\n * The value pointed to by linesize is set to the aligned size of each\n * channel's data buffer for planar layout, or to the aligned size of the\n * buffer for all channels for packed layout.\n *\n * The buffer in buf must be big enough to contain all the samples\n * (use av_samples_get_buffer_size() to compute its minimum size),\n * otherwise the audio_data pointers will point to invalid data.\n *\n * @see enum AVSampleFormat\n * The documentation for AVSampleFormat describes the data layout.\n *\n * @param[out] audio_data  array to be filled with the pointer for each channel\n * @param[out] linesize    calculated linesize, may be NULL\n * @param buf              the pointer to a buffer containing the samples\n * @param nb_channels      the number of channels\n * @param nb_samples       the number of samples in a single channel\n * @param sample_fmt       the sample format\n * @param align            buffer size alignment (0 = default, 1 = no alignment)\n * @return                 >=0 on success or a negative error code on failure\n * @todo return minimum size in bytes required for the buffer in case\n * of success at the next bump\n */\nint av_samples_fill_arrays(uint8_t **audio_data, int *linesize,\n                           const uint8_t *buf,\n                           int nb_channels, int nb_samples,\n                           enum AVSampleFormat sample_fmt, int align);\n\n/**\n * Allocate a samples buffer for nb_samples samples, and fill data pointers and\n * linesize accordingly.\n * The allocated samples buffer can be freed by using av_freep(&audio_data[0])\n * Allocated data will be initialized to silence.\n *\n * @see enum AVSampleFormat\n * The documentation for AVSampleFormat describes the data layout.\n *\n * @param[out] audio_data  array to be filled with the pointer for each channel\n * @param[out] linesize    aligned size for audio buffer(s), may be NULL\n * @param nb_channels      number of audio channels\n * @param nb_samples       number of samples per channel\n * @param align            buffer size alignment (0 = default, 1 = no alignment)\n * @return                 >=0 on success or a negative error code on failure\n * @todo return the size of the allocated buffer in case of success at the next bump\n * @see av_samples_fill_arrays()\n * @see av_samples_alloc_array_and_samples()\n */\nint av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,\n                     int nb_samples, enum AVSampleFormat sample_fmt, int align);\n\n/**\n * Allocate a data pointers array, samples buffer for nb_samples\n * samples, and fill data pointers and linesize accordingly.\n *\n * This is the same as av_samples_alloc(), but also allocates the data\n * pointers array.\n *\n * @see av_samples_alloc()\n */\nint av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,\n                                       int nb_samples, enum AVSampleFormat sample_fmt, int align);\n\n/**\n * Copy samples from src to dst.\n *\n * @param dst destination array of pointers to data planes\n * @param src source array of pointers to data planes\n * @param dst_offset offset in samples at which the data will be written to dst\n * @param src_offset offset in samples at which the data will be read from src\n * @param nb_samples number of samples to be copied\n * @param nb_channels number of audio channels\n * @param sample_fmt audio sample format\n */\nint av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,\n                    int src_offset, int nb_samples, int nb_channels,\n                    enum AVSampleFormat sample_fmt);\n\n/**\n * Fill an audio buffer with silence.\n *\n * @param audio_data  array of pointers to data planes\n * @param offset      offset in samples at which to start filling\n * @param nb_samples  number of samples to fill\n * @param nb_channels number of audio channels\n * @param sample_fmt  audio sample format\n */\nint av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,\n                           int nb_channels, enum AVSampleFormat sample_fmt);\n\n/**\n * @}\n * @}\n */\n#endif /* AVUTIL_SAMPLEFMT_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/sha.h",
    "content": "/*\n * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_SHA_H\n#define AVUTIL_SHA_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_sha SHA\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_sha_size;\n\nstruct AVSHA;\n\n/**\n * Allocate an AVSHA context.\n */\nstruct AVSHA *av_sha_alloc(void);\n\n/**\n * Initialize SHA-1 or SHA-2 hashing.\n *\n * @param context pointer to the function context (of size av_sha_size)\n * @param bits    number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits)\n * @return        zero if initialization succeeded, -1 otherwise\n */\nint av_sha_init(struct AVSHA* context, int bits);\n\n/**\n * Update hash value.\n *\n * @param context hash function context\n * @param data    input data to update hash with\n * @param len     input data length\n */\nvoid av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len);\n\n/**\n * Finish hashing and output digest value.\n *\n * @param context hash function context\n * @param digest  buffer where output digest value is stored\n */\nvoid av_sha_final(struct AVSHA* context, uint8_t *digest);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_SHA_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/sha512.h",
    "content": "/*\n * Copyright (C) 2007 Michael Niedermayer <michaelni@gmx.at>\n * Copyright (C) 2013 James Almer <jamrial@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_SHA512_H\n#define AVUTIL_SHA512_H\n\n#include <stdint.h>\n\n#include \"attributes.h\"\n#include \"version.h\"\n\n/**\n * @defgroup lavu_sha512 SHA512\n * @ingroup lavu_crypto\n * @{\n */\n\nextern const int av_sha512_size;\n\nstruct AVSHA512;\n\n/**\n * Allocate an AVSHA512 context.\n */\nstruct AVSHA512 *av_sha512_alloc(void);\n\n/**\n * Initialize SHA-2 512 hashing.\n *\n * @param context pointer to the function context (of size av_sha512_size)\n * @param bits    number of bits in digest (224, 256, 384 or 512 bits)\n * @return        zero if initialization succeeded, -1 otherwise\n */\nint av_sha512_init(struct AVSHA512* context, int bits);\n\n/**\n * Update hash value.\n *\n * @param context hash function context\n * @param data    input data to update hash with\n * @param len     input data length\n */\nvoid av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len);\n\n/**\n * Finish hashing and output digest value.\n *\n * @param context hash function context\n * @param digest  buffer where output digest value is stored\n */\nvoid av_sha512_final(struct AVSHA512* context, uint8_t *digest);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_SHA512_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/stereo3d.h",
    "content": "/*\n * Copyright (c) 2013 Vittorio Giovara <vittorio.giovara@gmail.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_STEREO3D_H\n#define AVUTIL_STEREO3D_H\n\n#include <stdint.h>\n\n#include \"frame.h\"\n\n/**\n * List of possible 3D Types\n */\nenum AVStereo3DType {\n    /**\n     * Video is not stereoscopic (and metadata has to be there).\n     */\n    AV_STEREO3D_2D,\n\n    /**\n     * Views are next to each other.\n     *\n     *    LLLLRRRR\n     *    LLLLRRRR\n     *    LLLLRRRR\n     *    ...\n     */\n    AV_STEREO3D_SIDEBYSIDE,\n\n    /**\n     * Views are on top of each other.\n     *\n     *    LLLLLLLL\n     *    LLLLLLLL\n     *    RRRRRRRR\n     *    RRRRRRRR\n     */\n    AV_STEREO3D_TOPBOTTOM,\n\n    /**\n     * Views are alternated temporally.\n     *\n     *     frame0   frame1   frame2   ...\n     *    LLLLLLLL RRRRRRRR LLLLLLLL\n     *    LLLLLLLL RRRRRRRR LLLLLLLL\n     *    LLLLLLLL RRRRRRRR LLLLLLLL\n     *    ...      ...      ...\n     */\n    AV_STEREO3D_FRAMESEQUENCE,\n\n    /**\n     * Views are packed in a checkerboard-like structure per pixel.\n     *\n     *    LRLRLRLR\n     *    RLRLRLRL\n     *    LRLRLRLR\n     *    ...\n     */\n    AV_STEREO3D_CHECKERBOARD,\n\n    /**\n     * Views are next to each other, but when upscaling\n     * apply a checkerboard pattern.\n     *\n     *     LLLLRRRR          L L L L    R R R R\n     *     LLLLRRRR    =>     L L L L  R R R R\n     *     LLLLRRRR          L L L L    R R R R\n     *     LLLLRRRR           L L L L  R R R R\n     */\n    AV_STEREO3D_SIDEBYSIDE_QUINCUNX,\n\n    /**\n     * Views are packed per line, as if interlaced.\n     *\n     *    LLLLLLLL\n     *    RRRRRRRR\n     *    LLLLLLLL\n     *    ...\n     */\n    AV_STEREO3D_LINES,\n\n    /**\n     * Views are packed per column.\n     *\n     *    LRLRLRLR\n     *    LRLRLRLR\n     *    LRLRLRLR\n     *    ...\n     */\n    AV_STEREO3D_COLUMNS,\n};\n\n\n/**\n * Inverted views, Right/Bottom represents the left view.\n */\n#define AV_STEREO3D_FLAG_INVERT     (1 << 0)\n\n/**\n * Stereo 3D type: this structure describes how two videos are packed\n * within a single video surface, with additional information as needed.\n *\n * @note The struct must be allocated with av_stereo3d_alloc() and\n *       its size is not a part of the public ABI.\n */\ntypedef struct AVStereo3D {\n    /**\n     * How views are packed within the video.\n     */\n    enum AVStereo3DType type;\n\n    /**\n     * Additional information about the frame packing.\n     */\n    int flags;\n} AVStereo3D;\n\n/**\n * Allocate an AVStereo3D structure and set its fields to default values.\n * The resulting struct can be freed using av_freep().\n *\n * @return An AVStereo3D filled with default values or NULL on failure.\n */\nAVStereo3D *av_stereo3d_alloc(void);\n\n/**\n * Allocate a complete AVFrameSideData and add it to the frame.\n *\n * @param frame The frame which side data is added to.\n *\n * @return The AVStereo3D structure to be filled by caller.\n */\nAVStereo3D *av_stereo3d_create_side_data(AVFrame *frame);\n\n#endif /* AVUTIL_STEREO3D_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/threadmessage.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public License\n * as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n * GNU Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public License\n * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,\n * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_THREADMESSAGE_H\n#define AVUTIL_THREADMESSAGE_H\n\ntypedef struct AVThreadMessageQueue AVThreadMessageQueue;\n\ntypedef enum AVThreadMessageFlags {\n\n    /**\n     * Perform non-blocking operation.\n     * If this flag is set, send and recv operations are non-blocking and\n     * return AVERROR(EAGAIN) immediately if they can not proceed.\n     */\n    AV_THREAD_MESSAGE_NONBLOCK = 1,\n\n} AVThreadMessageFlags;\n\n/**\n * Allocate a new message queue.\n *\n * @param mq      pointer to the message queue\n * @param nelem   maximum number of elements in the queue\n * @param elsize  size of each element in the queue\n * @return  >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if\n *          lavu was built without thread support\n */\nint av_thread_message_queue_alloc(AVThreadMessageQueue **mq,\n                                  unsigned nelem,\n                                  unsigned elsize);\n\n/**\n * Free a message queue.\n *\n * The message queue must no longer be in use by another thread.\n */\nvoid av_thread_message_queue_free(AVThreadMessageQueue **mq);\n\n/**\n * Send a message on the queue.\n */\nint av_thread_message_queue_send(AVThreadMessageQueue *mq,\n                                 void *msg,\n                                 unsigned flags);\n\n/**\n * Receive a message from the queue.\n */\nint av_thread_message_queue_recv(AVThreadMessageQueue *mq,\n                                 void *msg,\n                                 unsigned flags);\n\n/**\n * Set the sending error code.\n *\n * If the error code is set to non-zero, av_thread_message_queue_recv() will\n * return it immediately when there are no longer available messages.\n * Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used\n * to cause the receiving thread to stop or suspend its operation.\n */\nvoid av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq,\n                                          int err);\n\n/**\n * Set the receiving error code.\n *\n * If the error code is set to non-zero, av_thread_message_queue_send() will\n * return it immediately. Conventional values, such as AVERROR_EOF or\n * AVERROR(EAGAIN), can be used to cause the sending thread to stop or\n * suspend its operation.\n */\nvoid av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,\n                                          int err);\n\n#endif /* AVUTIL_THREADMESSAGE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/time.h",
    "content": "/*\n * Copyright (c) 2000-2003 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_TIME_H\n#define AVUTIL_TIME_H\n\n#include <stdint.h>\n\n/**\n * Get the current time in microseconds.\n */\nint64_t av_gettime(void);\n\n/**\n * Get the current time in microseconds since some unspecified starting point.\n * On platforms that support it, the time comes from a monotonic clock\n * This property makes this time source ideal for measuring relative time.\n * The returned values may not be monotonic on platforms where a monotonic\n * clock is not available.\n */\nint64_t av_gettime_relative(void);\n\n/**\n * Indicates with a boolean result if the av_gettime_relative() time source\n * is monotonic.\n */\nint av_gettime_relative_is_monotonic(void);\n\n/**\n * Sleep for a period of time.  Although the duration is expressed in\n * microseconds, the actual delay may be rounded to the precision of the\n * system timer.\n *\n * @param  usec Number of microseconds to sleep.\n * @return zero on success or (negative) error code.\n */\nint av_usleep(unsigned usec);\n\n#endif /* AVUTIL_TIME_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/timecode.h",
    "content": "/*\n * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier <baptiste.coudurier@gmail.com>\n * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch <clement.boesch@smartjog.com>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * Timecode helpers header\n */\n\n#ifndef AVUTIL_TIMECODE_H\n#define AVUTIL_TIMECODE_H\n\n#include <stdint.h>\n#include \"rational.h\"\n\n#define AV_TIMECODE_STR_SIZE 16\n\nenum AVTimecodeFlag {\n    AV_TIMECODE_FLAG_DROPFRAME      = 1<<0, ///< timecode is drop frame\n    AV_TIMECODE_FLAG_24HOURSMAX     = 1<<1, ///< timecode wraps after 24 hours\n    AV_TIMECODE_FLAG_ALLOWNEGATIVE  = 1<<2, ///< negative time values are allowed\n};\n\ntypedef struct {\n    int start;          ///< timecode frame start (first base frame number)\n    uint32_t flags;     ///< flags such as drop frame, +24 hours support, ...\n    AVRational rate;    ///< frame rate in rational form\n    unsigned fps;       ///< frame per second; must be consistent with the rate field\n} AVTimecode;\n\n/**\n * Adjust frame number for NTSC drop frame time code.\n *\n * @param framenum frame number to adjust\n * @param fps      frame per second, 30 or 60\n * @return         adjusted frame number\n * @warning        adjustment is only valid in NTSC 29.97 and 59.94\n */\nint av_timecode_adjust_ntsc_framenum2(int framenum, int fps);\n\n/**\n * Convert frame number to SMPTE 12M binary representation.\n *\n * @param tc       timecode data correctly initialized\n * @param framenum frame number\n * @return         the SMPTE binary representation\n *\n * @note Frame number adjustment is automatically done in case of drop timecode,\n *       you do NOT have to call av_timecode_adjust_ntsc_framenum2().\n * @note The frame number is relative to tc->start.\n * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity\n *       correction (PC) bits are set to zero.\n */\nuint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum);\n\n/**\n * Load timecode string in buf.\n *\n * @param buf      destination buffer, must be at least AV_TIMECODE_STR_SIZE long\n * @param tc       timecode data correctly initialized\n * @param framenum frame number\n * @return         the buf parameter\n *\n * @note Timecode representation can be a negative timecode and have more than\n *       24 hours, but will only be honored if the flags are correctly set.\n * @note The frame number is relative to tc->start.\n */\nchar *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum);\n\n/**\n * Get the timecode string from the SMPTE timecode format.\n *\n * @param buf        destination buffer, must be at least AV_TIMECODE_STR_SIZE long\n * @param tcsmpte    the 32-bit SMPTE timecode\n * @param prevent_df prevent the use of a drop flag when it is known the DF bit\n *                   is arbitrary\n * @return           the buf parameter\n */\nchar *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df);\n\n/**\n * Get the timecode string from the 25-bit timecode format (MPEG GOP format).\n *\n * @param buf     destination buffer, must be at least AV_TIMECODE_STR_SIZE long\n * @param tc25bit the 25-bits timecode\n * @return        the buf parameter\n */\nchar *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit);\n\n/**\n * Init a timecode struct with the passed parameters.\n *\n * @param log_ctx     a pointer to an arbitrary struct of which the first field\n *                    is a pointer to an AVClass struct (used for av_log)\n * @param tc          pointer to an allocated AVTimecode\n * @param rate        frame rate in rational form\n * @param flags       miscellaneous flags such as drop frame, +24 hours, ...\n *                    (see AVTimecodeFlag)\n * @param frame_start the first frame number\n * @return            0 on success, AVERROR otherwise\n */\nint av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx);\n\n/**\n * Parse timecode representation (hh:mm:ss[:;.]ff).\n *\n * @param log_ctx a pointer to an arbitrary struct of which the first field is a\n *                pointer to an AVClass struct (used for av_log).\n * @param tc      pointer to an allocated AVTimecode\n * @param rate    frame rate in rational form\n * @param str     timecode string which will determine the frame start\n * @return        0 on success, AVERROR otherwise\n */\nint av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx);\n\n/**\n * Check if the timecode feature is available for the given frame rate\n *\n * @return 0 if supported, <0 otherwise\n */\nint av_timecode_check_frame_rate(AVRational rate);\n\n#endif /* AVUTIL_TIMECODE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/timestamp.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n/**\n * @file\n * timestamp utils, mostly useful for debugging/logging purposes\n */\n\n#ifndef AVUTIL_TIMESTAMP_H\n#define AVUTIL_TIMESTAMP_H\n\n#include \"common.h\"\n\n#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64)\n#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS\n#endif\n\n#define AV_TS_MAX_STRING_SIZE 32\n\n/**\n * Fill the provided buffer with a string containing a timestamp\n * representation.\n *\n * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE\n * @param ts the timestamp to represent\n * @return the buffer in input\n */\nstatic inline char *av_ts_make_string(char *buf, int64_t ts)\n{\n    if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, \"NOPTS\");\n    else                      snprintf(buf, AV_TS_MAX_STRING_SIZE, \"%\"PRId64, ts);\n    return buf;\n}\n\n/**\n * Convenience macro, the return value should be used only directly in\n * function arguments but never stand-alone.\n */\n#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts)\n\n/**\n * Fill the provided buffer with a string containing a timestamp time\n * representation.\n *\n * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE\n * @param ts the timestamp to represent\n * @param tb the timebase of the timestamp\n * @return the buffer in input\n */\nstatic inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb)\n{\n    if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, \"NOPTS\");\n    else                      snprintf(buf, AV_TS_MAX_STRING_SIZE, \"%.6g\", av_q2d(*tb) * ts);\n    return buf;\n}\n\n/**\n * Convenience macro, the return value should be used only directly in\n * function arguments but never stand-alone.\n */\n#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb)\n\n#endif /* AVUTIL_TIMESTAMP_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/twofish.h",
    "content": "/*\n * An implementation of the TwoFish algorithm\n * Copyright (c) 2015 Supraja Meedinti\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_TWOFISH_H\n#define AVUTIL_TWOFISH_H\n\n#include <stdint.h>\n\n\n/**\n  * @file\n  * @brief Public header for libavutil TWOFISH algorithm\n  * @defgroup lavu_twofish TWOFISH\n  * @ingroup lavu_crypto\n  * @{\n  */\n\nextern const int av_twofish_size;\n\nstruct AVTWOFISH;\n\n/**\n  * Allocate an AVTWOFISH context\n  * To free the struct: av_free(ptr)\n  */\nstruct AVTWOFISH *av_twofish_alloc(void);\n\n/**\n  * Initialize an AVTWOFISH context.\n  *\n  * @param ctx an AVTWOFISH context\n  * @param key a key of size ranging from 1 to 32 bytes used for encryption/decryption\n  * @param key_bits number of keybits: 128, 192, 256 If less than the required, padded with zeroes to nearest valid value; return value is 0 if key_bits is 128/192/256, -1 if less than 0, 1 otherwise\n */\nint av_twofish_init(struct AVTWOFISH *ctx, const uint8_t *key, int key_bits);\n\n/**\n  * Encrypt or decrypt a buffer using a previously initialized context\n  *\n  * @param ctx an AVTWOFISH context\n  * @param dst destination array, can be equal to src\n  * @param src source array, can be equal to dst\n  * @param count number of 16 byte blocks\n  * @paran iv initialization vector for CBC mode, NULL for ECB mode\n  * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_twofish_crypt(struct AVTWOFISH *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt);\n\n/**\n * @}\n */\n#endif /* AVUTIL_TWOFISH_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/version.h",
    "content": "/*\n * copyright (c) 2003 Fabrice Bellard\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_VERSION_H\n#define AVUTIL_VERSION_H\n\n#include \"macros.h\"\n\n/**\n * @addtogroup version_utils\n *\n * Useful to check and match library version in order to maintain\n * backward compatibility.\n *\n * @{\n */\n\n#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c))\n#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c\n#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)\n\n/**\n * @}\n */\n\n/**\n * @file\n * @ingroup lavu\n * Libavutil version macros\n */\n\n/**\n * @defgroup lavu_ver Version and Build diagnostics\n *\n * Macros and function useful to check at compiletime and at runtime\n * which version of libavutil is in use.\n *\n * @{\n */\n\n#define LIBAVUTIL_VERSION_MAJOR  54\n#define LIBAVUTIL_VERSION_MINOR  20\n#define LIBAVUTIL_VERSION_MICRO 100\n\n#define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \\\n                                               LIBAVUTIL_VERSION_MINOR, \\\n                                               LIBAVUTIL_VERSION_MICRO)\n#define LIBAVUTIL_VERSION       AV_VERSION(LIBAVUTIL_VERSION_MAJOR,     \\\n                                           LIBAVUTIL_VERSION_MINOR,     \\\n                                           LIBAVUTIL_VERSION_MICRO)\n#define LIBAVUTIL_BUILD         LIBAVUTIL_VERSION_INT\n\n#define LIBAVUTIL_IDENT         \"Lavu\" AV_STRINGIFY(LIBAVUTIL_VERSION)\n\n/**\n * @}\n *\n * @defgroup depr_guards Deprecation guards\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n *\n * @{\n */\n\n#ifndef FF_API_OLD_AVOPTIONS\n#define FF_API_OLD_AVOPTIONS            (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_PIX_FMT\n#define FF_API_PIX_FMT                  (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_CONTEXT_SIZE\n#define FF_API_CONTEXT_SIZE             (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_PIX_FMT_DESC\n#define FF_API_PIX_FMT_DESC             (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_AV_REVERSE\n#define FF_API_AV_REVERSE               (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_AUDIOCONVERT\n#define FF_API_AUDIOCONVERT             (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_CPU_FLAG_MMX2\n#define FF_API_CPU_FLAG_MMX2            (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_LLS_PRIVATE\n#define FF_API_LLS_PRIVATE              (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_AVFRAME_LAVC\n#define FF_API_AVFRAME_LAVC             (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_VDPAU\n#define FF_API_VDPAU                    (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_GET_CHANNEL_LAYOUT_COMPAT\n#define FF_API_GET_CHANNEL_LAYOUT_COMPAT (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_XVMC\n#define FF_API_XVMC                     (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n#ifndef FF_API_OPT_TYPE_METADATA\n#define FF_API_OPT_TYPE_METADATA        (LIBAVUTIL_VERSION_MAJOR < 55)\n#endif\n\n#ifndef FF_CONST_AVUTIL55\n#if LIBAVUTIL_VERSION_MAJOR >= 55\n#define FF_CONST_AVUTIL55 const\n#else\n#define FF_CONST_AVUTIL55\n#endif\n#endif\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_VERSION_H */\n\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libavutil/xtea.h",
    "content": "/*\n * A 32-bit implementation of the XTEA algorithm\n * Copyright (c) 2012 Samuel Pitoiset\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef AVUTIL_XTEA_H\n#define AVUTIL_XTEA_H\n\n#include <stdint.h>\n\n/**\n * @file\n * @brief Public header for libavutil XTEA algorithm\n * @defgroup lavu_xtea XTEA\n * @ingroup lavu_crypto\n * @{\n */\n\ntypedef struct AVXTEA {\n    uint32_t key[16];\n} AVXTEA;\n\n/**\n * Initialize an AVXTEA context.\n *\n * @param ctx an AVXTEA context\n * @param key a key of 16 bytes used for encryption/decryption\n */\nvoid av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]);\n\n/**\n * Encrypt or decrypt a buffer using a previously initialized context.\n *\n * @param ctx an AVXTEA context\n * @param dst destination array, can be equal to src\n * @param src source array, can be equal to dst\n * @param count number of 8 byte blocks\n * @param iv initialization vector for CBC mode, if NULL then ECB will be used\n * @param decrypt 0 for encryption, 1 for decryption\n */\nvoid av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src,\n                   int count, uint8_t *iv, int decrypt);\n\n/**\n * @}\n */\n\n#endif /* AVUTIL_XTEA_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libswresample/swresample.h",
    "content": "/*\n * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at)\n *\n * This file is part of libswresample\n *\n * libswresample is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * libswresample is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with libswresample; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef SWRESAMPLE_SWRESAMPLE_H\n#define SWRESAMPLE_SWRESAMPLE_H\n\n/**\n * @file\n * @ingroup lswr\n * libswresample public header\n */\n\n/**\n * @defgroup lswr Libswresample\n * @{\n *\n * Libswresample (lswr) is a library that handles audio resampling, sample\n * format conversion and mixing.\n *\n * Interaction with lswr is done through SwrContext, which is\n * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters\n * must be set with the @ref avoptions API.\n *\n * The first thing you will need to do in order to use lswr is to allocate\n * SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you\n * are using the former, you must set options through the @ref avoptions API.\n * The latter function provides the same feature, but it allows you to set some\n * common options in the same statement.\n *\n * For example the following code will setup conversion from planar float sample\n * format to interleaved signed 16-bit integer, downsampling from 48kHz to\n * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing\n * matrix). This is using the swr_alloc() function.\n * @code\n * SwrContext *swr = swr_alloc();\n * av_opt_set_channel_layout(swr, \"in_channel_layout\",  AV_CH_LAYOUT_5POINT1, 0);\n * av_opt_set_channel_layout(swr, \"out_channel_layout\", AV_CH_LAYOUT_STEREO,  0);\n * av_opt_set_int(swr, \"in_sample_rate\",     48000,                0);\n * av_opt_set_int(swr, \"out_sample_rate\",    44100,                0);\n * av_opt_set_sample_fmt(swr, \"in_sample_fmt\",  AV_SAMPLE_FMT_FLTP, 0);\n * av_opt_set_sample_fmt(swr, \"out_sample_fmt\", AV_SAMPLE_FMT_S16,  0);\n * @endcode\n *\n * The same job can be done using swr_alloc_set_opts() as well:\n * @code\n * SwrContext *swr = swr_alloc_set_opts(NULL,  // we're allocating a new context\n *                       AV_CH_LAYOUT_STEREO,  // out_ch_layout\n *                       AV_SAMPLE_FMT_S16,    // out_sample_fmt\n *                       44100,                // out_sample_rate\n *                       AV_CH_LAYOUT_5POINT1, // in_ch_layout\n *                       AV_SAMPLE_FMT_FLTP,   // in_sample_fmt\n *                       48000,                // in_sample_rate\n *                       0,                    // log_offset\n *                       NULL);                // log_ctx\n * @endcode\n *\n * Once all values have been set, it must be initialized with swr_init(). If\n * you need to change the conversion parameters, you can change the parameters\n * using @ref AVOptions, as described above in the first example; or by using\n * swr_alloc_set_opts(), but with the first argument the allocated context.\n * You must then call swr_init() again.\n *\n * The conversion itself is done by repeatedly calling swr_convert().\n * Note that the samples may get buffered in swr if you provide insufficient\n * output space or if sample rate conversion is done, which requires \"future\"\n * samples. Samples that do not require future input can be retrieved at any\n * time by using swr_convert() (in_count can be set to 0).\n * At the end of conversion the resampling buffer can be flushed by calling\n * swr_convert() with NULL in and 0 in_count.\n *\n * The samples used in the conversion process can be managed with the libavutil\n * @ref lavu_sampmanip \"samples manipulation\" API, including av_samples_alloc()\n * function used in the following example.\n *\n * The delay between input and output, can at any time be found by using\n * swr_get_delay().\n *\n * The following code demonstrates the conversion loop assuming the parameters\n * from above and caller-defined functions get_input() and handle_output():\n * @code\n * uint8_t **input;\n * int in_samples;\n *\n * while (get_input(&input, &in_samples)) {\n *     uint8_t *output;\n *     int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) +\n *                                      in_samples, 44100, 48000, AV_ROUND_UP);\n *     av_samples_alloc(&output, NULL, 2, out_samples,\n *                      AV_SAMPLE_FMT_S16, 0);\n *     out_samples = swr_convert(swr, &output, out_samples,\n *                                      input, in_samples);\n *     handle_output(output, out_samples);\n *     av_freep(&output);\n * }\n * @endcode\n *\n * When the conversion is finished, the conversion\n * context and everything associated with it must be freed with swr_free().\n * A swr_close() function is also available, but it exists mainly for\n * compatibility with libavresample, and is not required to be called.\n *\n * There will be no memory leak if the data is not completely flushed before\n * swr_free().\n */\n\n#include <stdint.h>\n#include \"libavutil/frame.h\"\n#include \"libavutil/samplefmt.h\"\n\n#include \"libswresample/version.h\"\n\n#if LIBSWRESAMPLE_VERSION_MAJOR < 1\n#define SWR_CH_MAX 32   ///< Maximum number of channels\n#endif\n\n/**\n * @name Option constants\n * These constants are used for the @ref avoptions interface for lswr.\n * @{\n *\n */\n\n#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate\n//TODO use int resample ?\n//long term TODO can we enable this dynamically?\n\n/** Dithering algorithms */\nenum SwrDitherType {\n    SWR_DITHER_NONE = 0,\n    SWR_DITHER_RECTANGULAR,\n    SWR_DITHER_TRIANGULAR,\n    SWR_DITHER_TRIANGULAR_HIGHPASS,\n\n    SWR_DITHER_NS = 64,         ///< not part of API/ABI\n    SWR_DITHER_NS_LIPSHITZ,\n    SWR_DITHER_NS_F_WEIGHTED,\n    SWR_DITHER_NS_MODIFIED_E_WEIGHTED,\n    SWR_DITHER_NS_IMPROVED_E_WEIGHTED,\n    SWR_DITHER_NS_SHIBATA,\n    SWR_DITHER_NS_LOW_SHIBATA,\n    SWR_DITHER_NS_HIGH_SHIBATA,\n    SWR_DITHER_NB,              ///< not part of API/ABI\n};\n\n/** Resampling Engines */\nenum SwrEngine {\n    SWR_ENGINE_SWR,             /**< SW Resampler */\n    SWR_ENGINE_SOXR,            /**< SoX Resampler */\n    SWR_ENGINE_NB,              ///< not part of API/ABI\n};\n\n/** Resampling Filter Types */\nenum SwrFilterType {\n    SWR_FILTER_TYPE_CUBIC,              /**< Cubic */\n    SWR_FILTER_TYPE_BLACKMAN_NUTTALL,   /**< Blackman Nuttall Windowed Sinc */\n    SWR_FILTER_TYPE_KAISER,             /**< Kaiser Windowed Sinc */\n};\n\n/**\n * @}\n */\n\n/**\n * The libswresample context. Unlike libavcodec and libavformat, this structure\n * is opaque. This means that if you would like to set options, you must use\n * the @ref avoptions API and cannot directly set values to members of the\n * structure.\n */\ntypedef struct SwrContext SwrContext;\n\n/**\n * Get the AVClass for SwrContext. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n * @return the AVClass of SwrContext\n */\nconst AVClass *swr_get_class(void);\n\n/**\n * @name SwrContext constructor functions\n * @{\n */\n\n/**\n * Allocate SwrContext.\n *\n * If you use this function you will need to set the parameters (manually or\n * with swr_alloc_set_opts()) before calling swr_init().\n *\n * @see swr_alloc_set_opts(), swr_init(), swr_free()\n * @return NULL on error, allocated context otherwise\n */\nstruct SwrContext *swr_alloc(void);\n\n/**\n * Initialize context after user parameters have been set.\n * @note The context must be configured using the AVOption API.\n *\n * @see av_opt_set_int()\n * @see av_opt_set_dict()\n *\n * @param[in,out]   s Swr context to initialize\n * @return AVERROR error code in case of failure.\n */\nint swr_init(struct SwrContext *s);\n\n/**\n * Check whether an swr context has been initialized or not.\n *\n * @param[in]       s Swr context to check\n * @see swr_init()\n * @return positive if it has been initialized, 0 if not initialized\n */\nint swr_is_initialized(struct SwrContext *s);\n\n/**\n * Allocate SwrContext if needed and set/reset common parameters.\n *\n * This function does not require s to be allocated with swr_alloc(). On the\n * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters\n * on the allocated context.\n *\n * @param s               existing Swr context if available, or NULL if not\n * @param out_ch_layout   output channel layout (AV_CH_LAYOUT_*)\n * @param out_sample_fmt  output sample format (AV_SAMPLE_FMT_*).\n * @param out_sample_rate output sample rate (frequency in Hz)\n * @param in_ch_layout    input channel layout (AV_CH_LAYOUT_*)\n * @param in_sample_fmt   input sample format (AV_SAMPLE_FMT_*).\n * @param in_sample_rate  input sample rate (frequency in Hz)\n * @param log_offset      logging level offset\n * @param log_ctx         parent logging context, can be NULL\n *\n * @see swr_init(), swr_free()\n * @return NULL on error, allocated context otherwise\n */\nstruct SwrContext *swr_alloc_set_opts(struct SwrContext *s,\n                                      int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,\n                                      int64_t  in_ch_layout, enum AVSampleFormat  in_sample_fmt, int  in_sample_rate,\n                                      int log_offset, void *log_ctx);\n\n/**\n * @}\n *\n * @name SwrContext destructor functions\n * @{\n */\n\n/**\n * Free the given SwrContext and set the pointer to NULL.\n *\n * @param[in] s a pointer to a pointer to Swr context\n */\nvoid swr_free(struct SwrContext **s);\n\n/**\n * Closes the context so that swr_is_initialized() returns 0.\n *\n * The context can be brought back to life by running swr_init(),\n * swr_init() can also be used without swr_close().\n * This function is mainly provided for simplifying the usecase\n * where one tries to support libavresample and libswresample.\n *\n * @param[in,out] s Swr context to be closed\n */\nvoid swr_close(struct SwrContext *s);\n\n/**\n * @}\n *\n * @name Core conversion functions\n * @{\n */\n\n/** Convert audio.\n *\n * in and in_count can be set to 0 to flush the last few samples out at the\n * end.\n *\n * If more input is provided than output space then the input will be buffered.\n * You can avoid this buffering by providing more output space than input.\n * Conversion will run directly without copying whenever possible.\n *\n * @param s         allocated Swr context, with parameters set\n * @param out       output buffers, only the first one need be set in case of packed audio\n * @param out_count amount of space available for output in samples per channel\n * @param in        input buffers, only the first one need to be set in case of packed audio\n * @param in_count  number of input samples available in one channel\n *\n * @return number of samples output per channel, negative value on error\n */\nint swr_convert(struct SwrContext *s, uint8_t **out, int out_count,\n                                const uint8_t **in , int in_count);\n\n/**\n * Convert the next timestamp from input to output\n * timestamps are in 1/(in_sample_rate * out_sample_rate) units.\n *\n * @note There are 2 slightly differently behaving modes.\n *       @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)\n *              in this case timestamps will be passed through with delays compensated\n *       @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX)\n *              in this case the output timestamps will match output sample numbers.\n *              See ffmpeg-resampler(1) for the two modes of compensation.\n *\n * @param s[in]     initialized Swr context\n * @param pts[in]   timestamp for the next input sample, INT64_MIN if unknown\n * @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are\n *      function used internally for timestamp compensation.\n * @return the output timestamp for the next output sample\n */\nint64_t swr_next_pts(struct SwrContext *s, int64_t pts);\n\n/**\n * @}\n *\n * @name Low-level option setting functions\n * These functons provide a means to set low-level options that is not possible\n * with the AVOption API.\n * @{\n */\n\n/**\n * Activate resampling compensation (\"soft\" compensation). This function is\n * internally called when needed in swr_next_pts().\n *\n * @param[in,out] s             allocated Swr context. If it is not initialized,\n *                              or SWR_FLAG_RESAMPLE is not set, swr_init() is\n *                              called with the flag set.\n * @param[in]     sample_delta  delta in PTS per sample\n * @param[in]     compensation_distance number of samples to compensate for\n * @return    >= 0 on success, AVERROR error codes if:\n *            @li @c s is NULL,\n *            @li @c compensation_distance is less than 0,\n *            @li @c compensation_distance is 0 but sample_delta is not,\n *            @li compensation unsupported by resampler, or\n *            @li swr_init() fails when called.\n */\nint swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance);\n\n/**\n * Set a customized input channel mapping.\n *\n * @param[in,out] s           allocated Swr context, not yet initialized\n * @param[in]     channel_map customized input channel mapping (array of channel\n *                            indexes, -1 for a muted channel)\n * @return >= 0 on success, or AVERROR error code in case of failure.\n */\nint swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);\n\n/**\n * Set a customized remix matrix.\n *\n * @param s       allocated Swr context, not yet initialized\n * @param matrix  remix coefficients; matrix[i + stride * o] is\n *                the weight of input channel i in output channel o\n * @param stride  offset between lines of the matrix\n * @return  >= 0 on success, or AVERROR error code in case of failure.\n */\nint swr_set_matrix(struct SwrContext *s, const double *matrix, int stride);\n\n/**\n * @}\n *\n * @name Sample handling functions\n * @{\n */\n\n/**\n * Drops the specified number of output samples.\n *\n * This function, along with swr_inject_silence(), is called by swr_next_pts()\n * if needed for \"hard\" compensation.\n *\n * @param s     allocated Swr context\n * @param count number of samples to be dropped\n *\n * @return >= 0 on success, or a negative AVERROR code on failure\n */\nint swr_drop_output(struct SwrContext *s, int count);\n\n/**\n * Injects the specified number of silence samples.\n *\n * This function, along with swr_drop_output(), is called by swr_next_pts()\n * if needed for \"hard\" compensation.\n *\n * @param s     allocated Swr context\n * @param count number of samples to be dropped\n *\n * @return >= 0 on success, or a negative AVERROR code on failure\n */\nint swr_inject_silence(struct SwrContext *s, int count);\n\n/**\n * Gets the delay the next input sample will experience relative to the next output sample.\n *\n * Swresample can buffer data if more input has been provided than available\n * output space, also converting between sample rates needs a delay.\n * This function returns the sum of all such delays.\n * The exact delay is not necessarily an integer value in either input or\n * output sample rate. Especially when downsampling by a large value, the\n * output sample rate may be a poor choice to represent the delay, similarly\n * for upsampling and the input sample rate.\n *\n * @param s     swr context\n * @param base  timebase in which the returned delay will be:\n *              @li if it's set to 1 the returned delay is in seconds\n *              @li if it's set to 1000 the returned delay is in milliseconds\n *              @li if it's set to the input sample rate then the returned\n *                  delay is in input samples\n *              @li if it's set to the output sample rate then the returned\n *                  delay is in output samples\n *              @li if it's the least common multiple of in_sample_rate and\n *                  out_sample_rate then an exact rounding-free delay will be\n *                  returned\n * @returns     the delay in 1 / @c base units.\n */\nint64_t swr_get_delay(struct SwrContext *s, int64_t base);\n\n/**\n * @}\n *\n * @name Configuration accessors\n * @{\n */\n\n/**\n * Return the @ref LIBSWRESAMPLE_VERSION_INT constant.\n *\n * This is useful to check if the build-time libswresample has the same version\n * as the run-time one.\n *\n * @returns     the unsigned int-typed version\n */\nunsigned swresample_version(void);\n\n/**\n * Return the swr build-time configuration.\n *\n * @returns     the build-time @c ./configure flags\n */\nconst char *swresample_configuration(void);\n\n/**\n * Return the swr license.\n *\n * @returns     the license of libswresample, determined at build-time\n */\nconst char *swresample_license(void);\n\n/**\n * @}\n *\n * @name AVFrame based API\n * @{\n */\n\n/**\n * Convert the samples in the input AVFrame and write them to the output AVFrame.\n *\n * Input and output AVFrames must have channel_layout, sample_rate and format set.\n *\n * If the output AVFrame does not have the data pointers allocated the nb_samples\n * field will be set using av_frame_get_buffer()\n * is called to allocate the frame.\n *\n * The output AVFrame can be NULL or have fewer allocated samples than required.\n * In this case, any remaining samples not written to the output will be added\n * to an internal FIFO buffer, to be returned at the next call to this function\n * or to swr_convert().\n *\n * If converting sample rate, there may be data remaining in the internal\n * resampling delay buffer. swr_get_delay() tells the number of\n * remaining samples. To get this data as output, call this function or\n * swr_convert() with NULL input.\n *\n * If the SwrContext configuration does not match the output and\n * input AVFrame settings the conversion does not take place and depending on\n * which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED\n * or the result of a bitwise-OR of them is returned.\n *\n * @see swr_delay()\n * @see swr_convert()\n * @see swr_get_delay()\n *\n * @param swr             audio resample context\n * @param output          output AVFrame\n * @param input           input AVFrame\n * @return                0 on success, AVERROR on failure or nonmatching\n *                        configuration.\n */\nint swr_convert_frame(SwrContext *swr,\n                      AVFrame *output, const AVFrame *input);\n\n/**\n * Configure or reconfigure the SwrContext using the information\n * provided by the AVFrames.\n *\n * The original resampling context is reset even on failure.\n * The function calls swr_close() internally if the context is open.\n *\n * @see swr_close();\n *\n * @param swr             audio resample context\n * @param output          output AVFrame\n * @param input           input AVFrame\n * @return                0 on success, AVERROR on failure.\n */\nint swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in);\n\n/**\n * @}\n * @}\n */\n\n#endif /* SWRESAMPLE_SWRESAMPLE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libswresample/version.h",
    "content": "/*\n * Version macros.\n *\n * This file is part of libswresample\n *\n * libswresample is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * libswresample is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with libswresample; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef SWR_VERSION_H\n#define SWR_VERSION_H\n\n/**\n * @file\n * Libswresample version macros\n */\n\n#include \"libavutil/avutil.h\"\n\n#define LIBSWRESAMPLE_VERSION_MAJOR   1\n#define LIBSWRESAMPLE_VERSION_MINOR   1\n#define LIBSWRESAMPLE_VERSION_MICRO 100\n\n#define LIBSWRESAMPLE_VERSION_INT  AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \\\n                                                  LIBSWRESAMPLE_VERSION_MINOR, \\\n                                                  LIBSWRESAMPLE_VERSION_MICRO)\n#define LIBSWRESAMPLE_VERSION      AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \\\n                                              LIBSWRESAMPLE_VERSION_MINOR, \\\n                                              LIBSWRESAMPLE_VERSION_MICRO)\n#define LIBSWRESAMPLE_BUILD        LIBSWRESAMPLE_VERSION_INT\n\n#define LIBSWRESAMPLE_IDENT        \"SwR\" AV_STRINGIFY(LIBSWRESAMPLE_VERSION)\n\n#endif /* SWR_VERSION_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libswscale/swscale.h",
    "content": "/*\n * Copyright (C) 2001-2011 Michael Niedermayer <michaelni@gmx.at>\n *\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef SWSCALE_SWSCALE_H\n#define SWSCALE_SWSCALE_H\n\n/**\n * @file\n * @ingroup libsws\n * external API header\n */\n\n#include <stdint.h>\n\n#include \"libavutil/avutil.h\"\n#include \"libavutil/log.h\"\n#include \"libavutil/pixfmt.h\"\n#include \"version.h\"\n\n/**\n * @defgroup libsws Color conversion and scaling\n * @{\n *\n * Return the LIBSWSCALE_VERSION_INT constant.\n */\nunsigned swscale_version(void);\n\n/**\n * Return the libswscale build-time configuration.\n */\nconst char *swscale_configuration(void);\n\n/**\n * Return the libswscale license.\n */\nconst char *swscale_license(void);\n\n/* values for the flags, the stuff on the command line is different */\n#define SWS_FAST_BILINEAR     1\n#define SWS_BILINEAR          2\n#define SWS_BICUBIC           4\n#define SWS_X                 8\n#define SWS_POINT          0x10\n#define SWS_AREA           0x20\n#define SWS_BICUBLIN       0x40\n#define SWS_GAUSS          0x80\n#define SWS_SINC          0x100\n#define SWS_LANCZOS       0x200\n#define SWS_SPLINE        0x400\n\n#define SWS_SRC_V_CHR_DROP_MASK     0x30000\n#define SWS_SRC_V_CHR_DROP_SHIFT    16\n\n#define SWS_PARAM_DEFAULT           123456\n\n#define SWS_PRINT_INFO              0x1000\n\n//the following 3 flags are not completely implemented\n//internal chrominace subsampling info\n#define SWS_FULL_CHR_H_INT    0x2000\n//input subsampling info\n#define SWS_FULL_CHR_H_INP    0x4000\n#define SWS_DIRECT_BGR        0x8000\n#define SWS_ACCURATE_RND      0x40000\n#define SWS_BITEXACT          0x80000\n#define SWS_ERROR_DIFFUSION  0x800000\n\n#if FF_API_SWS_CPU_CAPS\n/**\n * CPU caps are autodetected now, those flags\n * are only provided for API compatibility.\n */\n#define SWS_CPU_CAPS_MMX      0x80000000\n#define SWS_CPU_CAPS_MMXEXT   0x20000000\n#define SWS_CPU_CAPS_MMX2     0x20000000\n#define SWS_CPU_CAPS_3DNOW    0x40000000\n#define SWS_CPU_CAPS_ALTIVEC  0x10000000\n#if FF_API_ARCH_BFIN\n#define SWS_CPU_CAPS_BFIN     0x01000000\n#endif\n#define SWS_CPU_CAPS_SSE2     0x02000000\n#endif\n\n#define SWS_MAX_REDUCE_CUTOFF 0.002\n\n#define SWS_CS_ITU709         1\n#define SWS_CS_FCC            4\n#define SWS_CS_ITU601         5\n#define SWS_CS_ITU624         5\n#define SWS_CS_SMPTE170M      5\n#define SWS_CS_SMPTE240M      7\n#define SWS_CS_DEFAULT        5\n\n/**\n * Return a pointer to yuv<->rgb coefficients for the given colorspace\n * suitable for sws_setColorspaceDetails().\n *\n * @param colorspace One of the SWS_CS_* macros. If invalid,\n * SWS_CS_DEFAULT is used.\n */\nconst int *sws_getCoefficients(int colorspace);\n\n// when used for filters they must have an odd number of elements\n// coeffs cannot be shared between vectors\ntypedef struct SwsVector {\n    double *coeff;              ///< pointer to the list of coefficients\n    int length;                 ///< number of coefficients in the vector\n} SwsVector;\n\n// vectors can be shared\ntypedef struct SwsFilter {\n    SwsVector *lumH;\n    SwsVector *lumV;\n    SwsVector *chrH;\n    SwsVector *chrV;\n} SwsFilter;\n\nstruct SwsContext;\n\n/**\n * Return a positive value if pix_fmt is a supported input format, 0\n * otherwise.\n */\nint sws_isSupportedInput(enum AVPixelFormat pix_fmt);\n\n/**\n * Return a positive value if pix_fmt is a supported output format, 0\n * otherwise.\n */\nint sws_isSupportedOutput(enum AVPixelFormat pix_fmt);\n\n/**\n * @param[in]  pix_fmt the pixel format\n * @return a positive value if an endianness conversion for pix_fmt is\n * supported, 0 otherwise.\n */\nint sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt);\n\n/**\n * Allocate an empty SwsContext. This must be filled and passed to\n * sws_init_context(). For filling see AVOptions, options.c and\n * sws_setColorspaceDetails().\n */\nstruct SwsContext *sws_alloc_context(void);\n\n/**\n * Initialize the swscaler context sws_context.\n *\n * @return zero or positive value on success, a negative value on\n * error\n */\nint sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);\n\n/**\n * Free the swscaler context swsContext.\n * If swsContext is NULL, then does nothing.\n */\nvoid sws_freeContext(struct SwsContext *swsContext);\n\n/**\n * Allocate and return an SwsContext. You need it to perform\n * scaling/conversion operations using sws_scale().\n *\n * @param srcW the width of the source image\n * @param srcH the height of the source image\n * @param srcFormat the source image format\n * @param dstW the width of the destination image\n * @param dstH the height of the destination image\n * @param dstFormat the destination image format\n * @param flags specify which algorithm and options to use for rescaling\n * @return a pointer to an allocated context, or NULL in case of error\n * @note this function is to be removed after a saner alternative is\n *       written\n */\nstruct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,\n                                  int dstW, int dstH, enum AVPixelFormat dstFormat,\n                                  int flags, SwsFilter *srcFilter,\n                                  SwsFilter *dstFilter, const double *param);\n\n/**\n * Scale the image slice in srcSlice and put the resulting scaled\n * slice in the image in dst. A slice is a sequence of consecutive\n * rows in an image.\n *\n * Slices have to be provided in sequential order, either in\n * top-bottom or bottom-top order. If slices are provided in\n * non-sequential order the behavior of the function is undefined.\n *\n * @param c         the scaling context previously created with\n *                  sws_getContext()\n * @param srcSlice  the array containing the pointers to the planes of\n *                  the source slice\n * @param srcStride the array containing the strides for each plane of\n *                  the source image\n * @param srcSliceY the position in the source image of the slice to\n *                  process, that is the number (counted starting from\n *                  zero) in the image of the first row of the slice\n * @param srcSliceH the height of the source slice, that is the number\n *                  of rows in the slice\n * @param dst       the array containing the pointers to the planes of\n *                  the destination image\n * @param dstStride the array containing the strides for each plane of\n *                  the destination image\n * @return          the height of the output slice\n */\nint sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],\n              const int srcStride[], int srcSliceY, int srcSliceH,\n              uint8_t *const dst[], const int dstStride[]);\n\n/**\n * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg)\n * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg)\n * @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x]\n * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x]\n * @param brightness 16.16 fixed point brightness correction\n * @param contrast 16.16 fixed point contrast correction\n * @param saturation 16.16 fixed point saturation correction\n * @return -1 if not supported\n */\nint sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],\n                             int srcRange, const int table[4], int dstRange,\n                             int brightness, int contrast, int saturation);\n\n/**\n * @return -1 if not supported\n */\nint sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,\n                             int *srcRange, int **table, int *dstRange,\n                             int *brightness, int *contrast, int *saturation);\n\n/**\n * Allocate and return an uninitialized vector with length coefficients.\n */\nSwsVector *sws_allocVec(int length);\n\n/**\n * Return a normalized Gaussian curve used to filter stuff\n * quality = 3 is high quality, lower is lower quality.\n */\nSwsVector *sws_getGaussianVec(double variance, double quality);\n\n/**\n * Allocate and return a vector with length coefficients, all\n * with the same value c.\n */\nSwsVector *sws_getConstVec(double c, int length);\n\n/**\n * Allocate and return a vector with just one coefficient, with\n * value 1.0.\n */\nSwsVector *sws_getIdentityVec(void);\n\n/**\n * Scale all the coefficients of a by the scalar value.\n */\nvoid sws_scaleVec(SwsVector *a, double scalar);\n\n/**\n * Scale all the coefficients of a so that their sum equals height.\n */\nvoid sws_normalizeVec(SwsVector *a, double height);\nvoid sws_convVec(SwsVector *a, SwsVector *b);\nvoid sws_addVec(SwsVector *a, SwsVector *b);\nvoid sws_subVec(SwsVector *a, SwsVector *b);\nvoid sws_shiftVec(SwsVector *a, int shift);\n\n/**\n * Allocate and return a clone of the vector a, that is a vector\n * with the same coefficients as a.\n */\nSwsVector *sws_cloneVec(SwsVector *a);\n\n/**\n * Print with av_log() a textual representation of the vector a\n * if log_level <= av_log_level.\n */\nvoid sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);\n\nvoid sws_freeVec(SwsVector *a);\n\nSwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,\n                                float lumaSharpen, float chromaSharpen,\n                                float chromaHShift, float chromaVShift,\n                                int verbose);\nvoid sws_freeFilter(SwsFilter *filter);\n\n/**\n * Check if context can be reused, otherwise reallocate a new one.\n *\n * If context is NULL, just calls sws_getContext() to get a new\n * context. Otherwise, checks if the parameters are the ones already\n * saved in context. If that is the case, returns the current\n * context. Otherwise, frees context and gets a new context with\n * the new parameters.\n *\n * Be warned that srcFilter and dstFilter are not checked, they\n * are assumed to remain the same.\n */\nstruct SwsContext *sws_getCachedContext(struct SwsContext *context,\n                                        int srcW, int srcH, enum AVPixelFormat srcFormat,\n                                        int dstW, int dstH, enum AVPixelFormat dstFormat,\n                                        int flags, SwsFilter *srcFilter,\n                                        SwsFilter *dstFilter, const double *param);\n\n/**\n * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits.\n *\n * The output frame will have the same packed format as the palette.\n *\n * @param src        source frame buffer\n * @param dst        destination frame buffer\n * @param num_pixels number of pixels to convert\n * @param palette    array with [256] entries, which must match color arrangement (RGB or BGR) of src\n */\nvoid sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);\n\n/**\n * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits.\n *\n * With the palette format \"ABCD\", the destination frame ends up with the format \"ABC\".\n *\n * @param src        source frame buffer\n * @param dst        destination frame buffer\n * @param num_pixels number of pixels to convert\n * @param palette    array with [256] entries, which must match color arrangement (RGB or BGR) of src\n */\nvoid sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);\n\n/**\n * Get the AVClass for swsContext. It can be used in combination with\n * AV_OPT_SEARCH_FAKE_OBJ for examining options.\n *\n * @see av_opt_find().\n */\nconst AVClass *sws_get_class(void);\n\n/**\n * @}\n */\n\n#endif /* SWSCALE_SWSCALE_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/FFMpegiOS/include/libswscale/version.h",
    "content": "/*\n * This file is part of FFmpeg.\n *\n * FFmpeg is free software; you can redistribute it and/or\n * modify it under the terms of the GNU Lesser General Public\n * License as published by the Free Software Foundation; either\n * version 2.1 of the License, or (at your option) any later version.\n *\n * FFmpeg is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU\n * Lesser General Public License for more details.\n *\n * You should have received a copy of the GNU Lesser General Public\n * License along with FFmpeg; if not, write to the Free Software\n * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n */\n\n#ifndef SWSCALE_VERSION_H\n#define SWSCALE_VERSION_H\n\n/**\n * @file\n * swscale version macros\n */\n\n#include \"libavutil/version.h\"\n\n#define LIBSWSCALE_VERSION_MAJOR 3\n#define LIBSWSCALE_VERSION_MINOR 1\n#define LIBSWSCALE_VERSION_MICRO 101\n\n#define LIBSWSCALE_VERSION_INT  AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \\\n                                               LIBSWSCALE_VERSION_MINOR, \\\n                                               LIBSWSCALE_VERSION_MICRO)\n#define LIBSWSCALE_VERSION      AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \\\n                                           LIBSWSCALE_VERSION_MINOR, \\\n                                           LIBSWSCALE_VERSION_MICRO)\n#define LIBSWSCALE_BUILD        LIBSWSCALE_VERSION_INT\n\n#define LIBSWSCALE_IDENT        \"SwS\" AV_STRINGIFY(LIBSWSCALE_VERSION)\n\n/**\n * FF_API_* defines may be placed below to indicate public API that will be\n * dropped at a future version bump. The defines themselves are not part of\n * the public API and may change, break or disappear at any time.\n */\n\n#ifndef FF_API_SWS_CPU_CAPS\n#define FF_API_SWS_CPU_CAPS    (LIBSWSCALE_VERSION_MAJOR < 4)\n#endif\n#ifndef FF_API_ARCH_BFIN\n#define FF_API_ARCH_BFIN       (LIBSWSCALE_VERSION_MAJOR < 4)\n#endif\n\n#endif /* SWSCALE_VERSION_H */\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/en.lproj/DFUViewController_iPad.xib",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<archive type=\"com.apple.InterfaceBuilder3.CocoaTouch.iPad.XIB\" version=\"8.00\">\n\t<data>\n\t\t<int key=\"IBDocument.SystemTarget\">1552</int>\n\t\t<string key=\"IBDocument.SystemVersion\">12C60</string>\n\t\t<string key=\"IBDocument.InterfaceBuilderVersion\">3084</string>\n\t\t<string key=\"IBDocument.AppKitVersion\">1187.34</string>\n\t\t<string key=\"IBDocument.HIToolboxVersion\">625.00</string>\n\t\t<object class=\"NSMutableDictionary\" key=\"IBDocument.PluginVersions\">\n\t\t\t<string key=\"NS.key.0\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t<string key=\"NS.object.0\">2083</string>\n\t\t</object>\n\t\t<array key=\"IBDocument.IntegratedClassDependencies\">\n\t\t\t<string>IBProxyObject</string>\n\t\t\t<string>IBUIButton</string>\n\t\t\t<string>IBUIImageView</string>\n\t\t\t<string>IBUILabel</string>\n\t\t\t<string>IBUIView</string>\n\t\t</array>\n\t\t<array key=\"IBDocument.PluginDependencies\">\n\t\t\t<string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t</array>\n\t\t<object class=\"NSMutableDictionary\" key=\"IBDocument.Metadata\">\n\t\t\t<string key=\"NS.key.0\">PluginDependencyRecalculationVersion</string>\n\t\t\t<integer value=\"1\" key=\"NS.object.0\"/>\n\t\t</object>\n\t\t<array class=\"NSMutableArray\" key=\"IBDocument.RootObjects\" id=\"1000\">\n\t\t\t<object class=\"IBProxyObject\" id=\"841351856\">\n\t\t\t\t<string key=\"IBProxiedObjectIdentifier\">IBFilesOwner</string>\n\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBIPadFramework</string>\n\t\t\t</object>\n\t\t\t<object class=\"IBProxyObject\" id=\"606714003\">\n\t\t\t\t<string key=\"IBProxiedObjectIdentifier\">IBFirstResponder</string>\n\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBIPadFramework</string>\n\t\t\t</object>\n\t\t\t<object class=\"IBUIView\" id=\"766721923\">\n\t\t\t\t<reference key=\"NSNextResponder\"/>\n\t\t\t\t<int key=\"NSvFlags\">274</int>\n\t\t\t\t<array class=\"NSMutableArray\" key=\"NSSubviews\">\n\t\t\t\t\t<object class=\"IBUIImageView\" id=\"299534821\">\n\t\t\t\t\t\t<reference key=\"NSNextResponder\" ref=\"766721923\"/>\n\t\t\t\t\t\t<int key=\"NSvFlags\">274</int>\n\t\t\t\t\t\t<string key=\"NSFrameSize\">{768, 1004}</string>\n\t\t\t\t\t\t<reference key=\"NSSuperview\" ref=\"766721923\"/>\n\t\t\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t\t\t<reference key=\"NSNextKeyView\" ref=\"931378226\"/>\n\t\t\t\t\t\t<bool key=\"IBUIOpaque\">NO</bool>\n\t\t\t\t\t\t<bool key=\"IBUIClearsContextBeforeDrawing\">NO</bool>\n\t\t\t\t\t\t<int key=\"IBUIContentMode\">4</int>\n\t\t\t\t\t\t<bool key=\"IBUIUserInteractionEnabled\">NO</bool>\n\t\t\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBIPadFramework</string>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBUIButton\" id=\"931378226\">\n\t\t\t\t\t\t<reference key=\"NSNextResponder\" ref=\"766721923\"/>\n\t\t\t\t\t\t<int key=\"NSvFlags\">292</int>\n\t\t\t\t\t\t<string key=\"NSFrame\">{{244, 950}, {58, 35}}</string>\n\t\t\t\t\t\t<reference key=\"NSSuperview\" ref=\"766721923\"/>\n\t\t\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t\t\t<reference key=\"NSNextKeyView\" ref=\"137139674\"/>\n\t\t\t\t\t\t<bool key=\"IBUIOpaque\">NO</bool>\n\t\t\t\t\t\t<bool key=\"IBUIClearsContextBeforeDrawing\">NO</bool>\n\t\t\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBIPadFramework</string>\n\t\t\t\t\t\t<int key=\"IBUIContentHorizontalAlignment\">0</int>\n\t\t\t\t\t\t<int key=\"IBUIContentVerticalAlignment\">0</int>\n\t\t\t\t\t\t<int key=\"IBUIButtonType\">1</int>\n\t\t\t\t\t\t<string key=\"IBUINormalTitle\">play</string>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUIHighlightedTitleColor\" id=\"884334884\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">3</int>\n\t\t\t\t\t\t\t<bytes key=\"NSWhite\">MQA</bytes>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUINormalTitleColor\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">1</int>\n\t\t\t\t\t\t\t<bytes key=\"NSRGB\">MC4xOTYwNzg0MyAwLjMwOTgwMzkzIDAuNTIxNTY4NjYAA</bytes>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUINormalTitleShadowColor\" id=\"313226797\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">3</int>\n\t\t\t\t\t\t\t<bytes key=\"NSWhite\">MC41AA</bytes>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"IBUIFontDescription\" key=\"IBUIFontDescription\">\n\t\t\t\t\t\t\t<string key=\"name\">Helvetica-Bold</string>\n\t\t\t\t\t\t\t<string key=\"family\">Helvetica</string>\n\t\t\t\t\t\t\t<int key=\"traits\">2</int>\n\t\t\t\t\t\t\t<double key=\"pointSize\">15</double>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"NSFont\" key=\"IBUIFont\" id=\"503068248\">\n\t\t\t\t\t\t\t<string key=\"NSName\">Helvetica-Bold</string>\n\t\t\t\t\t\t\t<double key=\"NSSize\">15</double>\n\t\t\t\t\t\t\t<int key=\"NSfFlags\">16</int>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBUIButton\" id=\"137139674\">\n\t\t\t\t\t\t<reference key=\"NSNextResponder\" ref=\"766721923\"/>\n\t\t\t\t\t\t<int key=\"NSvFlags\">292</int>\n\t\t\t\t\t\t<string key=\"NSFrame\">{{310, 950}, {58, 35}}</string>\n\t\t\t\t\t\t<reference key=\"NSSuperview\" ref=\"766721923\"/>\n\t\t\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t\t\t<reference key=\"NSNextKeyView\" ref=\"203300034\"/>\n\t\t\t\t\t\t<string key=\"NSReuseIdentifierKey\">_NS:9</string>\n\t\t\t\t\t\t<bool key=\"IBUIOpaque\">NO</bool>\n\t\t\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBIPadFramework</string>\n\t\t\t\t\t\t<int key=\"IBUIContentHorizontalAlignment\">0</int>\n\t\t\t\t\t\t<int key=\"IBUIContentVerticalAlignment\">0</int>\n\t\t\t\t\t\t<int key=\"IBUIButtonType\">1</int>\n\t\t\t\t\t\t<string key=\"IBUINormalTitle\">time</string>\n\t\t\t\t\t\t<reference key=\"IBUIHighlightedTitleColor\" ref=\"884334884\"/>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUINormalTitleColor\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">1</int>\n\t\t\t\t\t\t\t<bytes key=\"NSRGB\">MC4xOTYwNzg0MzQ2IDAuMzA5ODAzOTMyOSAwLjUyMTU2ODY1NgA</bytes>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<reference key=\"IBUINormalTitleShadowColor\" ref=\"313226797\"/>\n\t\t\t\t\t\t<object class=\"IBUIFontDescription\" key=\"IBUIFontDescription\">\n\t\t\t\t\t\t\t<int key=\"type\">2</int>\n\t\t\t\t\t\t\t<double key=\"pointSize\">15</double>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<reference key=\"IBUIFont\" ref=\"503068248\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBUILabel\" id=\"203300034\">\n\t\t\t\t\t\t<reference key=\"NSNextResponder\" ref=\"766721923\"/>\n\t\t\t\t\t\t<int key=\"NSvFlags\">292</int>\n\t\t\t\t\t\t<string key=\"NSFrame\">{{482, 955}, {42, 21}}</string>\n\t\t\t\t\t\t<reference key=\"NSSuperview\" ref=\"766721923\"/>\n\t\t\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t\t\t<bool key=\"IBUIOpaque\">NO</bool>\n\t\t\t\t\t\t<bool key=\"IBUIClipsSubviews\">YES</bool>\n\t\t\t\t\t\t<bool key=\"IBUIUserInteractionEnabled\">NO</bool>\n\t\t\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBIPadFramework</string>\n\t\t\t\t\t\t<string key=\"IBUIText\">fps</string>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUITextColor\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">1</int>\n\t\t\t\t\t\t\t<bytes key=\"NSRGB\">MCAwIDAAA</bytes>\n\t\t\t\t\t\t\t<string key=\"IBUIColorCocoaTouchKeyPath\">darkTextColor</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<nil key=\"IBUIHighlightedColor\"/>\n\t\t\t\t\t\t<int key=\"IBUIBaselineAdjustment\">1</int>\n\t\t\t\t\t\t<float key=\"IBUIMinimumFontSize\">10</float>\n\t\t\t\t\t\t<int key=\"IBUITextAlignment\">1</int>\n\t\t\t\t\t\t<object class=\"IBUIFontDescription\" key=\"IBUIFontDescription\">\n\t\t\t\t\t\t\t<int key=\"type\">1</int>\n\t\t\t\t\t\t\t<double key=\"pointSize\">17</double>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"NSFont\" key=\"IBUIFont\">\n\t\t\t\t\t\t\t<string key=\"NSName\">Helvetica</string>\n\t\t\t\t\t\t\t<double key=\"NSSize\">17</double>\n\t\t\t\t\t\t\t<int key=\"NSfFlags\">16</int>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t</object>\n\t\t\t\t</array>\n\t\t\t\t<string key=\"NSFrame\">{{0, 20}, {768, 1004}}</string>\n\t\t\t\t<reference key=\"NSSuperview\"/>\n\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t<object class=\"NSColor\" key=\"IBUIBackgroundColor\">\n\t\t\t\t\t<int key=\"NSColorSpace\">3</int>\n\t\t\t\t\t<bytes key=\"NSWhite\">MQA</bytes>\n\t\t\t\t\t<object class=\"NSColorSpace\" key=\"NSCustomColorSpace\">\n\t\t\t\t\t\t<int key=\"NSID\">2</int>\n\t\t\t\t\t</object>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBUISimulatedStatusBarMetrics\" key=\"IBUISimulatedStatusBarMetrics\">\n\t\t\t\t\t<int key=\"IBUIStatusBarStyle\">2</int>\n\t\t\t\t</object>\n\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBIPadFramework</string>\n\t\t\t</object>\n\t\t</array>\n\t\t<object class=\"IBObjectContainer\" key=\"IBDocument.Objects\">\n\t\t\t<array class=\"NSMutableArray\" key=\"connectionRecords\">\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchOutletConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">view</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"841351856\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"766721923\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">3</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchOutletConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">imageView</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"841351856\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"299534821\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">24</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchOutletConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">playButton</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"841351856\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"931378226\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">25</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchOutletConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">label</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"841351856\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"203300034\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">26</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchEventConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">playButtonAction:</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"931378226\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"841351856\"/>\n\t\t\t\t\t\t<int key=\"IBEventType\">7</int>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">27</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchEventConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">showTime:</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"137139674\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"841351856\"/>\n\t\t\t\t\t\t<int key=\"IBEventType\">7</int>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">28</int>\n\t\t\t\t</object>\n\t\t\t</array>\n\t\t\t<object class=\"IBMutableOrderedSet\" key=\"objectRecords\">\n\t\t\t\t<array key=\"orderedObjects\">\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">0</int>\n\t\t\t\t\t\t<array key=\"object\" id=\"0\"/>\n\t\t\t\t\t\t<reference key=\"children\" ref=\"1000\"/>\n\t\t\t\t\t\t<nil key=\"parent\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">-1</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"841351856\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"0\"/>\n\t\t\t\t\t\t<string key=\"objectName\">File's Owner</string>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">-2</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"606714003\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"0\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">2</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"766721923\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\">\n\t\t\t\t\t\t\t<reference ref=\"299534821\"/>\n\t\t\t\t\t\t\t<reference ref=\"931378226\"/>\n\t\t\t\t\t\t\t<reference ref=\"137139674\"/>\n\t\t\t\t\t\t\t<reference ref=\"203300034\"/>\n\t\t\t\t\t\t</array>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"0\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">4</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"931378226\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"766721923\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">5</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"137139674\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"766721923\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">6</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"299534821\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"766721923\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">7</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"203300034\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"766721923\"/>\n\t\t\t\t\t</object>\n\t\t\t\t</array>\n\t\t\t</object>\n\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"flattenedProperties\">\n\t\t\t\t<string key=\"-1.CustomClassName\">DFUViewController</string>\n\t\t\t\t<string key=\"-1.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"-2.CustomClassName\">UIResponder</string>\n\t\t\t\t<string key=\"-2.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"2.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"4.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"5.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"6.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"7.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t</dictionary>\n\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"unlocalizedProperties\"/>\n\t\t\t<nil key=\"activeLocalization\"/>\n\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"localizations\"/>\n\t\t\t<nil key=\"sourceID\"/>\n\t\t\t<int key=\"maxID\">28</int>\n\t\t</object>\n\t\t<object class=\"IBClassDescriber\" key=\"IBDocument.Classes\">\n\t\t\t<array class=\"NSMutableArray\" key=\"referencedPartialClassDescriptions\">\n\t\t\t\t<object class=\"IBPartialClassDescription\">\n\t\t\t\t\t<string key=\"className\">DFUViewController</string>\n\t\t\t\t\t<string key=\"superclassName\">UIViewController</string>\n\t\t\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"actions\">\n\t\t\t\t\t\t<string key=\"playButtonAction:\">id</string>\n\t\t\t\t\t\t<string key=\"showTime:\">id</string>\n\t\t\t\t\t</dictionary>\n\t\t\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"actionInfosByName\">\n\t\t\t\t\t\t<object class=\"IBActionInfo\" key=\"playButtonAction:\">\n\t\t\t\t\t\t\t<string key=\"name\">playButtonAction:</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">id</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"IBActionInfo\" key=\"showTime:\">\n\t\t\t\t\t\t\t<string key=\"name\">showTime:</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">id</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t</dictionary>\n\t\t\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"outlets\">\n\t\t\t\t\t\t<string key=\"imageView\">UIImageView</string>\n\t\t\t\t\t\t<string key=\"label\">UILabel</string>\n\t\t\t\t\t\t<string key=\"playButton\">UIButton</string>\n\t\t\t\t\t</dictionary>\n\t\t\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"toOneOutletInfosByName\">\n\t\t\t\t\t\t<object class=\"IBToOneOutletInfo\" key=\"imageView\">\n\t\t\t\t\t\t\t<string key=\"name\">imageView</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">UIImageView</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"IBToOneOutletInfo\" key=\"label\">\n\t\t\t\t\t\t\t<string key=\"name\">label</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">UILabel</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"IBToOneOutletInfo\" key=\"playButton\">\n\t\t\t\t\t\t\t<string key=\"name\">playButton</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">UIButton</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t</dictionary>\n\t\t\t\t\t<object class=\"IBClassDescriptionSource\" key=\"sourceIdentifier\">\n\t\t\t\t\t\t<string key=\"majorKey\">IBProjectSource</string>\n\t\t\t\t\t\t<string key=\"minorKey\">./Classes/DFUViewController.h</string>\n\t\t\t\t\t</object>\n\t\t\t\t</object>\n\t\t\t</array>\n\t\t</object>\n\t\t<int key=\"IBDocument.localizationMode\">0</int>\n\t\t<string key=\"IBDocument.TargetRuntimeIdentifier\">IBIPadFramework</string>\n\t\t<bool key=\"IBDocument.PluginDeclaredDependenciesTrackSystemTargetVersion\">YES</bool>\n\t\t<int key=\"IBDocument.defaultPropertyAccessControl\">3</int>\n\t\t<string key=\"IBCocoaTouchPluginVersion\">2083</string>\n\t</data>\n</archive>\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/en.lproj/DFUViewController_iPhone.xib",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<archive type=\"com.apple.InterfaceBuilder3.CocoaTouch.XIB\" version=\"8.00\">\n\t<data>\n\t\t<int key=\"IBDocument.SystemTarget\">1552</int>\n\t\t<string key=\"IBDocument.SystemVersion\">12C60</string>\n\t\t<string key=\"IBDocument.InterfaceBuilderVersion\">3084</string>\n\t\t<string key=\"IBDocument.AppKitVersion\">1187.34</string>\n\t\t<string key=\"IBDocument.HIToolboxVersion\">625.00</string>\n\t\t<object class=\"NSMutableDictionary\" key=\"IBDocument.PluginVersions\">\n\t\t\t<string key=\"NS.key.0\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t<string key=\"NS.object.0\">2083</string>\n\t\t</object>\n\t\t<array key=\"IBDocument.IntegratedClassDependencies\">\n\t\t\t<string>IBProxyObject</string>\n\t\t\t<string>IBUIButton</string>\n\t\t\t<string>IBUIImageView</string>\n\t\t\t<string>IBUILabel</string>\n\t\t\t<string>IBUIView</string>\n\t\t</array>\n\t\t<array key=\"IBDocument.PluginDependencies\">\n\t\t\t<string>com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t</array>\n\t\t<object class=\"NSMutableDictionary\" key=\"IBDocument.Metadata\">\n\t\t\t<string key=\"NS.key.0\">PluginDependencyRecalculationVersion</string>\n\t\t\t<integer value=\"1\" key=\"NS.object.0\"/>\n\t\t</object>\n\t\t<array class=\"NSMutableArray\" key=\"IBDocument.RootObjects\" id=\"1000\">\n\t\t\t<object class=\"IBProxyObject\" id=\"372490531\">\n\t\t\t\t<string key=\"IBProxiedObjectIdentifier\">IBFilesOwner</string>\n\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBCocoaTouchFramework</string>\n\t\t\t</object>\n\t\t\t<object class=\"IBProxyObject\" id=\"843779117\">\n\t\t\t\t<string key=\"IBProxiedObjectIdentifier\">IBFirstResponder</string>\n\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBCocoaTouchFramework</string>\n\t\t\t</object>\n\t\t\t<object class=\"IBUIView\" id=\"774585933\">\n\t\t\t\t<reference key=\"NSNextResponder\"/>\n\t\t\t\t<int key=\"NSvFlags\">274</int>\n\t\t\t\t<array class=\"NSMutableArray\" key=\"NSSubviews\">\n\t\t\t\t\t<object class=\"IBUIImageView\" id=\"686962460\">\n\t\t\t\t\t\t<reference key=\"NSNextResponder\" ref=\"774585933\"/>\n\t\t\t\t\t\t<int key=\"NSvFlags\">274</int>\n\t\t\t\t\t\t<string key=\"NSFrameSize\">{320, 548}</string>\n\t\t\t\t\t\t<reference key=\"NSSuperview\" ref=\"774585933\"/>\n\t\t\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t\t\t<reference key=\"NSNextKeyView\" ref=\"1003644215\"/>\n\t\t\t\t\t\t<bool key=\"IBUIOpaque\">NO</bool>\n\t\t\t\t\t\t<bool key=\"IBUIClearsContextBeforeDrawing\">NO</bool>\n\t\t\t\t\t\t<int key=\"IBUIContentMode\">4</int>\n\t\t\t\t\t\t<bool key=\"IBUIUserInteractionEnabled\">NO</bool>\n\t\t\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBCocoaTouchFramework</string>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBUILabel\" id=\"941375600\">\n\t\t\t\t\t\t<reference key=\"NSNextResponder\" ref=\"774585933\"/>\n\t\t\t\t\t\t<int key=\"NSvFlags\">292</int>\n\t\t\t\t\t\t<string key=\"NSFrame\">{{258, 500}, {42, 21}}</string>\n\t\t\t\t\t\t<reference key=\"NSSuperview\" ref=\"774585933\"/>\n\t\t\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t\t\t<bool key=\"IBUIOpaque\">NO</bool>\n\t\t\t\t\t\t<bool key=\"IBUIClipsSubviews\">YES</bool>\n\t\t\t\t\t\t<bool key=\"IBUIUserInteractionEnabled\">NO</bool>\n\t\t\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBCocoaTouchFramework</string>\n\t\t\t\t\t\t<string key=\"IBUIText\">fps</string>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUITextColor\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">1</int>\n\t\t\t\t\t\t\t<bytes key=\"NSRGB\">MCAwIDAAA</bytes>\n\t\t\t\t\t\t\t<string key=\"IBUIColorCocoaTouchKeyPath\">darkTextColor</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<nil key=\"IBUIHighlightedColor\"/>\n\t\t\t\t\t\t<int key=\"IBUIBaselineAdjustment\">1</int>\n\t\t\t\t\t\t<float key=\"IBUIMinimumFontSize\">10</float>\n\t\t\t\t\t\t<int key=\"IBUITextAlignment\">1</int>\n\t\t\t\t\t\t<object class=\"IBUIFontDescription\" key=\"IBUIFontDescription\">\n\t\t\t\t\t\t\t<int key=\"type\">1</int>\n\t\t\t\t\t\t\t<double key=\"pointSize\">17</double>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"NSFont\" key=\"IBUIFont\">\n\t\t\t\t\t\t\t<string key=\"NSName\">Helvetica</string>\n\t\t\t\t\t\t\t<double key=\"NSSize\">17</double>\n\t\t\t\t\t\t\t<int key=\"NSfFlags\">16</int>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBUIButton\" id=\"497305065\">\n\t\t\t\t\t\t<reference key=\"NSNextResponder\" ref=\"774585933\"/>\n\t\t\t\t\t\t<int key=\"NSvFlags\">292</int>\n\t\t\t\t\t\t<string key=\"NSFrame\">{{86, 494}, {58, 35}}</string>\n\t\t\t\t\t\t<reference key=\"NSSuperview\" ref=\"774585933\"/>\n\t\t\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t\t\t<reference key=\"NSNextKeyView\" ref=\"941375600\"/>\n\t\t\t\t\t\t<string key=\"NSReuseIdentifierKey\">_NS:9</string>\n\t\t\t\t\t\t<bool key=\"IBUIOpaque\">NO</bool>\n\t\t\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBCocoaTouchFramework</string>\n\t\t\t\t\t\t<int key=\"IBUIContentHorizontalAlignment\">0</int>\n\t\t\t\t\t\t<int key=\"IBUIContentVerticalAlignment\">0</int>\n\t\t\t\t\t\t<int key=\"IBUIButtonType\">1</int>\n\t\t\t\t\t\t<string key=\"IBUINormalTitle\">time</string>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUIHighlightedTitleColor\" id=\"1044723469\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">3</int>\n\t\t\t\t\t\t\t<bytes key=\"NSWhite\">MQA</bytes>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUINormalTitleColor\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">1</int>\n\t\t\t\t\t\t\t<bytes key=\"NSRGB\">MC4xOTYwNzg0MzQ2IDAuMzA5ODAzOTMyOSAwLjUyMTU2ODY1NgA</bytes>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUINormalTitleShadowColor\" id=\"106241316\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">3</int>\n\t\t\t\t\t\t\t<bytes key=\"NSWhite\">MC41AA</bytes>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"IBUIFontDescription\" key=\"IBUIFontDescription\">\n\t\t\t\t\t\t\t<int key=\"type\">2</int>\n\t\t\t\t\t\t\t<double key=\"pointSize\">15</double>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"NSFont\" key=\"IBUIFont\" id=\"382640342\">\n\t\t\t\t\t\t\t<string key=\"NSName\">Helvetica-Bold</string>\n\t\t\t\t\t\t\t<double key=\"NSSize\">15</double>\n\t\t\t\t\t\t\t<int key=\"NSfFlags\">16</int>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBUIButton\" id=\"1003644215\">\n\t\t\t\t\t\t<reference key=\"NSNextResponder\" ref=\"774585933\"/>\n\t\t\t\t\t\t<int key=\"NSvFlags\">292</int>\n\t\t\t\t\t\t<string key=\"NSFrame\">{{20, 494}, {58, 35}}</string>\n\t\t\t\t\t\t<reference key=\"NSSuperview\" ref=\"774585933\"/>\n\t\t\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t\t\t<reference key=\"NSNextKeyView\" ref=\"497305065\"/>\n\t\t\t\t\t\t<bool key=\"IBUIOpaque\">NO</bool>\n\t\t\t\t\t\t<bool key=\"IBUIClearsContextBeforeDrawing\">NO</bool>\n\t\t\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBCocoaTouchFramework</string>\n\t\t\t\t\t\t<int key=\"IBUIContentHorizontalAlignment\">0</int>\n\t\t\t\t\t\t<int key=\"IBUIContentVerticalAlignment\">0</int>\n\t\t\t\t\t\t<int key=\"IBUIButtonType\">1</int>\n\t\t\t\t\t\t<string key=\"IBUINormalTitle\">play</string>\n\t\t\t\t\t\t<reference key=\"IBUIHighlightedTitleColor\" ref=\"1044723469\"/>\n\t\t\t\t\t\t<object class=\"NSColor\" key=\"IBUINormalTitleColor\">\n\t\t\t\t\t\t\t<int key=\"NSColorSpace\">1</int>\n\t\t\t\t\t\t\t<bytes key=\"NSRGB\">MC4xOTYwNzg0MyAwLjMwOTgwMzkzIDAuNTIxNTY4NjYAA</bytes>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<reference key=\"IBUINormalTitleShadowColor\" ref=\"106241316\"/>\n\t\t\t\t\t\t<object class=\"IBUIFontDescription\" key=\"IBUIFontDescription\">\n\t\t\t\t\t\t\t<string key=\"name\">Helvetica-Bold</string>\n\t\t\t\t\t\t\t<string key=\"family\">Helvetica</string>\n\t\t\t\t\t\t\t<int key=\"traits\">2</int>\n\t\t\t\t\t\t\t<double key=\"pointSize\">15</double>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<reference key=\"IBUIFont\" ref=\"382640342\"/>\n\t\t\t\t\t</object>\n\t\t\t\t</array>\n\t\t\t\t<string key=\"NSFrame\">{{0, 20}, {320, 548}}</string>\n\t\t\t\t<reference key=\"NSSuperview\"/>\n\t\t\t\t<reference key=\"NSWindow\"/>\n\t\t\t\t<reference key=\"NSNextKeyView\" ref=\"686962460\"/>\n\t\t\t\t<object class=\"NSColor\" key=\"IBUIBackgroundColor\">\n\t\t\t\t\t<int key=\"NSColorSpace\">3</int>\n\t\t\t\t\t<bytes key=\"NSWhite\">MC43NQA</bytes>\n\t\t\t\t\t<object class=\"NSColorSpace\" key=\"NSCustomColorSpace\">\n\t\t\t\t\t\t<int key=\"NSID\">2</int>\n\t\t\t\t\t</object>\n\t\t\t\t</object>\n\t\t\t\t<bool key=\"IBUIClearsContextBeforeDrawing\">NO</bool>\n\t\t\t\t<object class=\"IBUISimulatedStatusBarMetrics\" key=\"IBUISimulatedStatusBarMetrics\"/>\n\t\t\t\t<object class=\"IBUIScreenMetrics\" key=\"IBUISimulatedDestinationMetrics\">\n\t\t\t\t\t<string key=\"IBUISimulatedSizeMetricsClass\">IBUIScreenMetrics</string>\n\t\t\t\t\t<object class=\"NSMutableDictionary\" key=\"IBUINormalizedOrientationToSizeMap\">\n\t\t\t\t\t\t<bool key=\"EncodedWithXMLCoder\">YES</bool>\n\t\t\t\t\t\t<array key=\"dict.sortedKeys\">\n\t\t\t\t\t\t\t<integer value=\"1\"/>\n\t\t\t\t\t\t\t<integer value=\"3\"/>\n\t\t\t\t\t\t</array>\n\t\t\t\t\t\t<array key=\"dict.values\">\n\t\t\t\t\t\t\t<string>{320, 568}</string>\n\t\t\t\t\t\t\t<string>{568, 320}</string>\n\t\t\t\t\t\t</array>\n\t\t\t\t\t</object>\n\t\t\t\t\t<string key=\"IBUITargetRuntime\">IBCocoaTouchFramework</string>\n\t\t\t\t\t<string key=\"IBUIDisplayName\">Retina 4 Full Screen</string>\n\t\t\t\t\t<int key=\"IBUIType\">2</int>\n\t\t\t\t</object>\n\t\t\t\t<string key=\"targetRuntimeIdentifier\">IBCocoaTouchFramework</string>\n\t\t\t</object>\n\t\t</array>\n\t\t<object class=\"IBObjectContainer\" key=\"IBDocument.Objects\">\n\t\t\t<array class=\"NSMutableArray\" key=\"connectionRecords\">\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchOutletConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">view</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"372490531\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"774585933\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">7</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchOutletConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">label</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"372490531\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"941375600\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">28</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchOutletConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">playButton</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"372490531\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"1003644215\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">29</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchOutletConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">imageView</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"372490531\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"686962460\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">30</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchEventConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">showTime:</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"497305065\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"372490531\"/>\n\t\t\t\t\t\t<int key=\"IBEventType\">7</int>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">32</int>\n\t\t\t\t</object>\n\t\t\t\t<object class=\"IBConnectionRecord\">\n\t\t\t\t\t<object class=\"IBCocoaTouchEventConnection\" key=\"connection\">\n\t\t\t\t\t\t<string key=\"label\">playButtonAction:</string>\n\t\t\t\t\t\t<reference key=\"source\" ref=\"1003644215\"/>\n\t\t\t\t\t\t<reference key=\"destination\" ref=\"372490531\"/>\n\t\t\t\t\t\t<int key=\"IBEventType\">7</int>\n\t\t\t\t\t</object>\n\t\t\t\t\t<int key=\"connectionID\">31</int>\n\t\t\t\t</object>\n\t\t\t</array>\n\t\t\t<object class=\"IBMutableOrderedSet\" key=\"objectRecords\">\n\t\t\t\t<array key=\"orderedObjects\">\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">0</int>\n\t\t\t\t\t\t<array key=\"object\" id=\"0\"/>\n\t\t\t\t\t\t<reference key=\"children\" ref=\"1000\"/>\n\t\t\t\t\t\t<nil key=\"parent\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">-1</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"372490531\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"0\"/>\n\t\t\t\t\t\t<string key=\"objectName\">File's Owner</string>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">-2</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"843779117\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"0\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">6</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"774585933\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\">\n\t\t\t\t\t\t\t<reference ref=\"1003644215\"/>\n\t\t\t\t\t\t\t<reference ref=\"497305065\"/>\n\t\t\t\t\t\t\t<reference ref=\"941375600\"/>\n\t\t\t\t\t\t\t<reference ref=\"686962460\"/>\n\t\t\t\t\t\t</array>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"0\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">8</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"941375600\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"774585933\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">9</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"497305065\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"774585933\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">10</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"1003644215\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"774585933\"/>\n\t\t\t\t\t</object>\n\t\t\t\t\t<object class=\"IBObjectRecord\">\n\t\t\t\t\t\t<int key=\"objectID\">11</int>\n\t\t\t\t\t\t<reference key=\"object\" ref=\"686962460\"/>\n\t\t\t\t\t\t<array class=\"NSMutableArray\" key=\"children\"/>\n\t\t\t\t\t\t<reference key=\"parent\" ref=\"774585933\"/>\n\t\t\t\t\t</object>\n\t\t\t\t</array>\n\t\t\t</object>\n\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"flattenedProperties\">\n\t\t\t\t<string key=\"-1.CustomClassName\">DFUViewController</string>\n\t\t\t\t<string key=\"-1.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"-2.CustomClassName\">UIResponder</string>\n\t\t\t\t<string key=\"-2.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"10.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"11.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"6.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"8.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t\t<string key=\"9.IBPluginDependency\">com.apple.InterfaceBuilder.IBCocoaTouchPlugin</string>\n\t\t\t</dictionary>\n\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"unlocalizedProperties\"/>\n\t\t\t<nil key=\"activeLocalization\"/>\n\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"localizations\"/>\n\t\t\t<nil key=\"sourceID\"/>\n\t\t\t<int key=\"maxID\">32</int>\n\t\t</object>\n\t\t<object class=\"IBClassDescriber\" key=\"IBDocument.Classes\">\n\t\t\t<array class=\"NSMutableArray\" key=\"referencedPartialClassDescriptions\">\n\t\t\t\t<object class=\"IBPartialClassDescription\">\n\t\t\t\t\t<string key=\"className\">DFUViewController</string>\n\t\t\t\t\t<string key=\"superclassName\">UIViewController</string>\n\t\t\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"actions\">\n\t\t\t\t\t\t<string key=\"playButtonAction:\">id</string>\n\t\t\t\t\t\t<string key=\"showTime:\">id</string>\n\t\t\t\t\t</dictionary>\n\t\t\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"actionInfosByName\">\n\t\t\t\t\t\t<object class=\"IBActionInfo\" key=\"playButtonAction:\">\n\t\t\t\t\t\t\t<string key=\"name\">playButtonAction:</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">id</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"IBActionInfo\" key=\"showTime:\">\n\t\t\t\t\t\t\t<string key=\"name\">showTime:</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">id</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t</dictionary>\n\t\t\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"outlets\">\n\t\t\t\t\t\t<string key=\"imageView\">UIImageView</string>\n\t\t\t\t\t\t<string key=\"label\">UILabel</string>\n\t\t\t\t\t\t<string key=\"playButton\">UIButton</string>\n\t\t\t\t\t</dictionary>\n\t\t\t\t\t<dictionary class=\"NSMutableDictionary\" key=\"toOneOutletInfosByName\">\n\t\t\t\t\t\t<object class=\"IBToOneOutletInfo\" key=\"imageView\">\n\t\t\t\t\t\t\t<string key=\"name\">imageView</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">UIImageView</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"IBToOneOutletInfo\" key=\"label\">\n\t\t\t\t\t\t\t<string key=\"name\">label</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">UILabel</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t\t<object class=\"IBToOneOutletInfo\" key=\"playButton\">\n\t\t\t\t\t\t\t<string key=\"name\">playButton</string>\n\t\t\t\t\t\t\t<string key=\"candidateClassName\">UIButton</string>\n\t\t\t\t\t\t</object>\n\t\t\t\t\t</dictionary>\n\t\t\t\t\t<object class=\"IBClassDescriptionSource\" key=\"sourceIdentifier\">\n\t\t\t\t\t\t<string key=\"majorKey\">IBProjectSource</string>\n\t\t\t\t\t\t<string key=\"minorKey\">./Classes/DFUViewController.h</string>\n\t\t\t\t\t</object>\n\t\t\t\t</object>\n\t\t\t</array>\n\t\t</object>\n\t\t<int key=\"IBDocument.localizationMode\">0</int>\n\t\t<string key=\"IBDocument.TargetRuntimeIdentifier\">IBCocoaTouchFramework</string>\n\t\t<bool key=\"IBDocument.PluginDeclaredDependenciesTrackSystemTargetVersion\">YES</bool>\n\t\t<int key=\"IBDocument.defaultPropertyAccessControl\">3</int>\n\t\t<string key=\"IBCocoaTouchPluginVersion\">2083</string>\n\t</data>\n</archive>\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/en.lproj/InfoPlist.strings",
    "content": "/* Localized versions of Info.plist keys */\n\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer/main.m",
    "content": "//\n//  main.m\n//  DFURTSPPlayer\n//\n//  Created by Bogdan Furdui on 3/7/13.\n//  Copyright (c) 2013 Bogdan Furdui. All rights reserved.\n//\n\n#import <UIKit/UIKit.h>\n\n#import \"DFUAppDelegate.h\"\n\nint main(int argc, char *argv[])\n{\n    @autoreleasepool {\n        return UIApplicationMain(argc, argv, nil, NSStringFromClass([DFUAppDelegate class]));\n    }\n}\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayer.xcodeproj/project.pbxproj",
    "content": "// !$*UTF8*$!\n{\n\tarchiveVersion = 1;\n\tclasses = {\n\t};\n\tobjectVersion = 46;\n\tobjects = {\n\n/* Begin PBXBuildFile section */\n\t\t0649AB1416E9200D001323D2 /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649AB1316E9200D001323D2 /* UIKit.framework */; };\n\t\t0649AB1616E9200D001323D2 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649AB1516E9200D001323D2 /* Foundation.framework */; };\n\t\t0649AB1816E9200D001323D2 /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649AB1716E9200D001323D2 /* CoreGraphics.framework */; };\n\t\t0649AB1E16E9200D001323D2 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 0649AB1C16E9200D001323D2 /* InfoPlist.strings */; };\n\t\t0649AB2016E9200D001323D2 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 0649AB1F16E9200D001323D2 /* main.m */; };\n\t\t0649AB2416E9200D001323D2 /* DFUAppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 0649AB2316E9200D001323D2 /* DFUAppDelegate.m */; };\n\t\t0649AB2D16E9200D001323D2 /* DFUViewController.m in Sources */ = {isa = PBXBuildFile; fileRef = 0649AB2C16E9200D001323D2 /* DFUViewController.m */; };\n\t\t0649AB3016E9200D001323D2 /* DFUViewController_iPhone.xib in Resources */ = {isa = PBXBuildFile; fileRef = 0649AB2E16E9200D001323D2 /* DFUViewController_iPhone.xib */; };\n\t\t0649AB3316E9200D001323D2 /* DFUViewController_iPad.xib in Resources */ = {isa = PBXBuildFile; fileRef = 0649AB3116E9200D001323D2 /* DFUViewController_iPad.xib */; };\n\t\t0649AB3B16E9200D001323D2 /* SenTestingKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649AB3A16E9200D001323D2 /* SenTestingKit.framework */; };\n\t\t0649AB3C16E9200D001323D2 /* UIKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649AB1316E9200D001323D2 /* UIKit.framework */; };\n\t\t0649AB3D16E9200D001323D2 /* Foundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649AB1516E9200D001323D2 /* Foundation.framework */; };\n\t\t0649AB4516E9200D001323D2 /* InfoPlist.strings in Resources */ = {isa = PBXBuildFile; fileRef = 0649AB4316E9200D001323D2 /* InfoPlist.strings */; };\n\t\t0649AB4816E9200D001323D2 /* DFURTSPPlayerTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 0649AB4716E9200D001323D2 /* DFURTSPPlayerTests.m */; };\n\t\t0649AB5516E92055001323D2 /* Default-568h@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = 0649AB5216E92055001323D2 /* Default-568h@2x.png */; };\n\t\t0649AB5616E92055001323D2 /* Default.png in Resources */ = {isa = PBXBuildFile; fileRef = 0649AB5316E92055001323D2 /* Default.png */; };\n\t\t0649AB5716E92055001323D2 /* Default@2x.png in Resources */ = {isa = PBXBuildFile; fileRef = 0649AB5416E92055001323D2 /* Default@2x.png */; };\n\t\t0649ABC016E928DF001323D2 /* libbz2.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABBF16E928DF001323D2 /* libbz2.dylib */; };\n\t\t0649ABC216E928E6001323D2 /* libz.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABC116E928E6001323D2 /* libz.dylib */; };\n\t\t0649ABC816E92C9F001323D2 /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABC716E92C9F001323D2 /* AVFoundation.framework */; };\n\t\t0649ABCA16E92CA7001323D2 /* QuartzCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABC916E92CA7001323D2 /* QuartzCore.framework */; };\n\t\t0649ABCC16E92CB1001323D2 /* CoreMedia.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABCB16E92CB1001323D2 /* CoreMedia.framework */; };\n\t\t0649ABCE16E92CB8001323D2 /* CoreImage.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABCD16E92CB8001323D2 /* CoreImage.framework */; };\n\t\t0649ABD016E92CBE001323D2 /* CoreVideo.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABCF16E92CBE001323D2 /* CoreVideo.framework */; };\n\t\t0649ABDB16E93103001323D2 /* AudioStreamer.m in Sources */ = {isa = PBXBuildFile; fileRef = 0649ABD616E93103001323D2 /* AudioStreamer.m */; };\n\t\t0649ABDC16E93103001323D2 /* Utilities.m in Sources */ = {isa = PBXBuildFile; fileRef = 0649ABD816E93103001323D2 /* Utilities.m */; };\n\t\t0649ABDD16E93103001323D2 /* RTSPPlayer.m in Sources */ = {isa = PBXBuildFile; fileRef = 0649ABDA16E93103001323D2 /* RTSPPlayer.m */; };\n\t\t0649ABDF16E93112001323D2 /* AudioToolbox.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABDE16E93112001323D2 /* AudioToolbox.framework */; };\n\t\t0649ABE116E93227001323D2 /* sophie.mov in Resources */ = {isa = PBXBuildFile; fileRef = 0649ABE016E93227001323D2 /* sophie.mov */; };\n\t\t75601E3E1AB9FC3400C57B81 /* libavcodec.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABAF16E928BA001323D2 /* libavcodec.a */; };\n\t\t75601E3F1AB9FC3400C57B81 /* libavdevice.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABB016E928BA001323D2 /* libavdevice.a */; };\n\t\t75601E401AB9FC3400C57B81 /* libavfilter.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABB116E928BA001323D2 /* libavfilter.a */; };\n\t\t75601E411AB9FC3400C57B81 /* libavformat.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABB216E928BA001323D2 /* libavformat.a */; };\n\t\t75601E421AB9FC3400C57B81 /* libavutil.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABB416E928BA001323D2 /* libavutil.a */; };\n\t\t75601E431AB9FC3400C57B81 /* libswresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABB516E928BA001323D2 /* libswresample.a */; };\n\t\t75601E441AB9FC3400C57B81 /* libswscale.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0649ABB616E928BA001323D2 /* libswscale.a */; };\n\t\t75601E461ABA00DF00C57B81 /* libiconv.dylib in Frameworks */ = {isa = PBXBuildFile; fileRef = 75601E451ABA00DF00C57B81 /* libiconv.dylib */; };\n/* End PBXBuildFile section */\n\n/* Begin PBXContainerItemProxy section */\n\t\t0649AB3E16E9200D001323D2 /* PBXContainerItemProxy */ = {\n\t\t\tisa = PBXContainerItemProxy;\n\t\t\tcontainerPortal = 0649AB0816E9200D001323D2 /* Project object */;\n\t\t\tproxyType = 1;\n\t\t\tremoteGlobalIDString = 0649AB0F16E9200D001323D2;\n\t\t\tremoteInfo = DFURTSPPlayer;\n\t\t};\n/* End PBXContainerItemProxy section */\n\n/* Begin PBXFileReference section */\n\t\t0649AB1016E9200D001323D2 /* DFURTSPPlayer.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = DFURTSPPlayer.app; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t0649AB1316E9200D001323D2 /* UIKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = UIKit.framework; path = System/Library/Frameworks/UIKit.framework; sourceTree = SDKROOT; };\n\t\t0649AB1516E9200D001323D2 /* Foundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Foundation.framework; path = System/Library/Frameworks/Foundation.framework; sourceTree = SDKROOT; };\n\t\t0649AB1716E9200D001323D2 /* CoreGraphics.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreGraphics.framework; path = System/Library/Frameworks/CoreGraphics.framework; sourceTree = SDKROOT; };\n\t\t0649AB1B16E9200D001323D2 /* DFURTSPPlayer-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = \"DFURTSPPlayer-Info.plist\"; sourceTree = \"<group>\"; };\n\t\t0649AB1D16E9200D001323D2 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = \"<group>\"; };\n\t\t0649AB1F16E9200D001323D2 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = \"<group>\"; };\n\t\t0649AB2116E9200D001323D2 /* DFURTSPPlayer-Prefix.pch */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = \"DFURTSPPlayer-Prefix.pch\"; sourceTree = \"<group>\"; };\n\t\t0649AB2216E9200D001323D2 /* DFUAppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = DFUAppDelegate.h; sourceTree = \"<group>\"; };\n\t\t0649AB2316E9200D001323D2 /* DFUAppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = DFUAppDelegate.m; sourceTree = \"<group>\"; };\n\t\t0649AB2B16E9200D001323D2 /* DFUViewController.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = DFUViewController.h; sourceTree = \"<group>\"; };\n\t\t0649AB2C16E9200D001323D2 /* DFUViewController.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = DFUViewController.m; sourceTree = \"<group>\"; };\n\t\t0649AB2F16E9200D001323D2 /* en */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = en; path = en.lproj/DFUViewController_iPhone.xib; sourceTree = \"<group>\"; };\n\t\t0649AB3216E9200D001323D2 /* en */ = {isa = PBXFileReference; lastKnownFileType = file.xib; name = en; path = en.lproj/DFUViewController_iPad.xib; sourceTree = \"<group>\"; };\n\t\t0649AB3916E9200D001323D2 /* DFURTSPPlayerTests.octest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = DFURTSPPlayerTests.octest; sourceTree = BUILT_PRODUCTS_DIR; };\n\t\t0649AB3A16E9200D001323D2 /* SenTestingKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = SenTestingKit.framework; path = Library/Frameworks/SenTestingKit.framework; sourceTree = DEVELOPER_DIR; };\n\t\t0649AB4216E9200D001323D2 /* DFURTSPPlayerTests-Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = \"DFURTSPPlayerTests-Info.plist\"; sourceTree = \"<group>\"; };\n\t\t0649AB4416E9200D001323D2 /* en */ = {isa = PBXFileReference; lastKnownFileType = text.plist.strings; name = en; path = en.lproj/InfoPlist.strings; sourceTree = \"<group>\"; };\n\t\t0649AB4616E9200D001323D2 /* DFURTSPPlayerTests.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = DFURTSPPlayerTests.h; sourceTree = \"<group>\"; };\n\t\t0649AB4716E9200D001323D2 /* DFURTSPPlayerTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = DFURTSPPlayerTests.m; sourceTree = \"<group>\"; };\n\t\t0649AB5216E92055001323D2 /* Default-568h@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = \"Default-568h@2x.png\"; sourceTree = \"<group>\"; };\n\t\t0649AB5316E92055001323D2 /* Default.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = Default.png; sourceTree = \"<group>\"; };\n\t\t0649AB5416E92055001323D2 /* Default@2x.png */ = {isa = PBXFileReference; lastKnownFileType = image.png; path = \"Default@2x.png\"; sourceTree = \"<group>\"; };\n\t\t0649AB5D16E928BA001323D2 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = \"<group>\"; };\n\t\t0649AB5E16E928BA001323D2 /* avfft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfft.h; sourceTree = \"<group>\"; };\n\t\t0649AB5F16E928BA001323D2 /* dxva2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dxva2.h; sourceTree = \"<group>\"; };\n\t\t0649AB6016E928BA001323D2 /* old_codec_ids.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_codec_ids.h; sourceTree = \"<group>\"; };\n\t\t0649AB6116E928BA001323D2 /* vaapi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vaapi.h; sourceTree = \"<group>\"; };\n\t\t0649AB6216E928BA001323D2 /* vda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vda.h; sourceTree = \"<group>\"; };\n\t\t0649AB6316E928BA001323D2 /* vdpau.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vdpau.h; sourceTree = \"<group>\"; };\n\t\t0649AB6416E928BA001323D2 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = \"<group>\"; };\n\t\t0649AB6516E928BA001323D2 /* xvmc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xvmc.h; sourceTree = \"<group>\"; };\n\t\t0649AB6716E928BA001323D2 /* avdevice.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avdevice.h; sourceTree = \"<group>\"; };\n\t\t0649AB6816E928BA001323D2 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = \"<group>\"; };\n\t\t0649AB6A16E928BA001323D2 /* asrc_abuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asrc_abuffer.h; sourceTree = \"<group>\"; };\n\t\t0649AB6B16E928BA001323D2 /* avcodec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avcodec.h; sourceTree = \"<group>\"; };\n\t\t0649AB6C16E928BA001323D2 /* avfilter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfilter.h; sourceTree = \"<group>\"; };\n\t\t0649AB6D16E928BA001323D2 /* avfiltergraph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avfiltergraph.h; sourceTree = \"<group>\"; };\n\t\t0649AB6E16E928BA001323D2 /* buffersink.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersink.h; sourceTree = \"<group>\"; };\n\t\t0649AB6F16E928BA001323D2 /* buffersrc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffersrc.h; sourceTree = \"<group>\"; };\n\t\t0649AB7016E928BA001323D2 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = \"<group>\"; };\n\t\t0649AB7216E928BA001323D2 /* avformat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avformat.h; sourceTree = \"<group>\"; };\n\t\t0649AB7316E928BA001323D2 /* avio.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avio.h; sourceTree = \"<group>\"; };\n\t\t0649AB7416E928BA001323D2 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = \"<group>\"; };\n\t\t0649AB7616E928BA001323D2 /* avresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avresample.h; sourceTree = \"<group>\"; };\n\t\t0649AB7716E928BA001323D2 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = \"<group>\"; };\n\t\t0649AB7916E928BA001323D2 /* adler32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adler32.h; sourceTree = \"<group>\"; };\n\t\t0649AB7A16E928BA001323D2 /* aes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aes.h; sourceTree = \"<group>\"; };\n\t\t0649AB7B16E928BA001323D2 /* attributes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = \"<group>\"; };\n\t\t0649AB7C16E928BA001323D2 /* audio_fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audio_fifo.h; sourceTree = \"<group>\"; };\n\t\t0649AB7D16E928BA001323D2 /* audioconvert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = audioconvert.h; sourceTree = \"<group>\"; };\n\t\t0649AB7E16E928BA001323D2 /* avassert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avassert.h; sourceTree = \"<group>\"; };\n\t\t0649AB7F16E928BA001323D2 /* avconfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avconfig.h; sourceTree = \"<group>\"; };\n\t\t0649AB8016E928BA001323D2 /* avstring.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avstring.h; sourceTree = \"<group>\"; };\n\t\t0649AB8116E928BA001323D2 /* avutil.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = avutil.h; sourceTree = \"<group>\"; };\n\t\t0649AB8216E928BA001323D2 /* base64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base64.h; sourceTree = \"<group>\"; };\n\t\t0649AB8316E928BA001323D2 /* blowfish.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blowfish.h; sourceTree = \"<group>\"; };\n\t\t0649AB8416E928BA001323D2 /* bprint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bprint.h; sourceTree = \"<group>\"; };\n\t\t0649AB8516E928BA001323D2 /* bswap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bswap.h; sourceTree = \"<group>\"; };\n\t\t0649AB8616E928BA001323D2 /* channel_layout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_layout.h; sourceTree = \"<group>\"; };\n\t\t0649AB8716E928BA001323D2 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = \"<group>\"; };\n\t\t0649AB8816E928BA001323D2 /* cpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu.h; sourceTree = \"<group>\"; };\n\t\t0649AB8916E928BA001323D2 /* crc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = crc.h; sourceTree = \"<group>\"; };\n\t\t0649AB8A16E928BA001323D2 /* dict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dict.h; sourceTree = \"<group>\"; };\n\t\t0649AB8B16E928BA001323D2 /* error.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error.h; sourceTree = \"<group>\"; };\n\t\t0649AB8C16E928BA001323D2 /* eval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = \"<group>\"; };\n\t\t0649AB8D16E928BA001323D2 /* fifo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fifo.h; sourceTree = \"<group>\"; };\n\t\t0649AB8E16E928BA001323D2 /* file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file.h; sourceTree = \"<group>\"; };\n\t\t0649AB8F16E928BA001323D2 /* hmac.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hmac.h; sourceTree = \"<group>\"; };\n\t\t0649AB9016E928BA001323D2 /* imgutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = imgutils.h; sourceTree = \"<group>\"; };\n\t\t0649AB9116E928BA001323D2 /* intfloat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat.h; sourceTree = \"<group>\"; };\n\t\t0649AB9216E928BA001323D2 /* intfloat_readwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intfloat_readwrite.h; sourceTree = \"<group>\"; };\n\t\t0649AB9316E928BA001323D2 /* intreadwrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intreadwrite.h; sourceTree = \"<group>\"; };\n\t\t0649AB9416E928BA001323D2 /* lfg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lfg.h; sourceTree = \"<group>\"; };\n\t\t0649AB9516E928BA001323D2 /* log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log.h; sourceTree = \"<group>\"; };\n\t\t0649AB9616E928BA001323D2 /* lzo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lzo.h; sourceTree = \"<group>\"; };\n\t\t0649AB9716E928BA001323D2 /* mathematics.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mathematics.h; sourceTree = \"<group>\"; };\n\t\t0649AB9816E928BA001323D2 /* md5.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = md5.h; sourceTree = \"<group>\"; };\n\t\t0649AB9916E928BA001323D2 /* mem.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mem.h; sourceTree = \"<group>\"; };\n\t\t0649AB9A16E928BA001323D2 /* old_pix_fmts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = old_pix_fmts.h; sourceTree = \"<group>\"; };\n\t\t0649AB9B16E928BA001323D2 /* opt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opt.h; sourceTree = \"<group>\"; };\n\t\t0649AB9C16E928BA001323D2 /* parseutils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parseutils.h; sourceTree = \"<group>\"; };\n\t\t0649AB9D16E928BA001323D2 /* pixdesc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixdesc.h; sourceTree = \"<group>\"; };\n\t\t0649AB9E16E928BA001323D2 /* pixfmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixfmt.h; sourceTree = \"<group>\"; };\n\t\t0649AB9F16E928BA001323D2 /* random_seed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = random_seed.h; sourceTree = \"<group>\"; };\n\t\t0649ABA016E928BA001323D2 /* rational.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rational.h; sourceTree = \"<group>\"; };\n\t\t0649ABA116E928BA001323D2 /* samplefmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = samplefmt.h; sourceTree = \"<group>\"; };\n\t\t0649ABA216E928BA001323D2 /* sha.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sha.h; sourceTree = \"<group>\"; };\n\t\t0649ABA316E928BA001323D2 /* time.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time.h; sourceTree = \"<group>\"; };\n\t\t0649ABA416E928BA001323D2 /* timecode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timecode.h; sourceTree = \"<group>\"; };\n\t\t0649ABA516E928BA001323D2 /* timestamp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timestamp.h; sourceTree = \"<group>\"; };\n\t\t0649ABA616E928BA001323D2 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = \"<group>\"; };\n\t\t0649ABA716E928BA001323D2 /* xtea.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xtea.h; sourceTree = \"<group>\"; };\n\t\t0649ABA916E928BA001323D2 /* swresample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swresample.h; sourceTree = \"<group>\"; };\n\t\t0649ABAA16E928BA001323D2 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = \"<group>\"; };\n\t\t0649ABAC16E928BA001323D2 /* swscale.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swscale.h; sourceTree = \"<group>\"; };\n\t\t0649ABAD16E928BA001323D2 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = \"<group>\"; };\n\t\t0649ABAF16E928BA001323D2 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavcodec.a; sourceTree = \"<group>\"; };\n\t\t0649ABB016E928BA001323D2 /* libavdevice.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavdevice.a; sourceTree = \"<group>\"; };\n\t\t0649ABB116E928BA001323D2 /* libavfilter.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavfilter.a; sourceTree = \"<group>\"; };\n\t\t0649ABB216E928BA001323D2 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavformat.a; sourceTree = \"<group>\"; };\n\t\t0649ABB316E928BA001323D2 /* libavresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavresample.a; sourceTree = \"<group>\"; };\n\t\t0649ABB416E928BA001323D2 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libavutil.a; sourceTree = \"<group>\"; };\n\t\t0649ABB516E928BA001323D2 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswresample.a; sourceTree = \"<group>\"; };\n\t\t0649ABB616E928BA001323D2 /* libswscale.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libswscale.a; sourceTree = \"<group>\"; };\n\t\t0649ABBF16E928DF001323D2 /* libbz2.dylib */ = {isa = PBXFileReference; lastKnownFileType = \"compiled.mach-o.dylib\"; name = libbz2.dylib; path = usr/lib/libbz2.dylib; sourceTree = SDKROOT; };\n\t\t0649ABC116E928E6001323D2 /* libz.dylib */ = {isa = PBXFileReference; lastKnownFileType = \"compiled.mach-o.dylib\"; name = libz.dylib; path = usr/lib/libz.dylib; sourceTree = SDKROOT; };\n\t\t0649ABC716E92C9F001323D2 /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = System/Library/Frameworks/AVFoundation.framework; sourceTree = SDKROOT; };\n\t\t0649ABC916E92CA7001323D2 /* QuartzCore.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = QuartzCore.framework; path = System/Library/Frameworks/QuartzCore.framework; sourceTree = SDKROOT; };\n\t\t0649ABCB16E92CB1001323D2 /* CoreMedia.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreMedia.framework; path = System/Library/Frameworks/CoreMedia.framework; sourceTree = SDKROOT; };\n\t\t0649ABCD16E92CB8001323D2 /* CoreImage.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreImage.framework; path = System/Library/Frameworks/CoreImage.framework; sourceTree = SDKROOT; };\n\t\t0649ABCF16E92CBE001323D2 /* CoreVideo.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreVideo.framework; path = System/Library/Frameworks/CoreVideo.framework; sourceTree = SDKROOT; };\n\t\t0649ABD516E93103001323D2 /* AudioStreamer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AudioStreamer.h; sourceTree = \"<group>\"; };\n\t\t0649ABD616E93103001323D2 /* AudioStreamer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = AudioStreamer.m; sourceTree = \"<group>\"; };\n\t\t0649ABD716E93103001323D2 /* Utilities.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Utilities.h; sourceTree = \"<group>\"; };\n\t\t0649ABD816E93103001323D2 /* Utilities.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Utilities.m; sourceTree = \"<group>\"; };\n\t\t0649ABD916E93103001323D2 /* RTSPPlayer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RTSPPlayer.h; sourceTree = \"<group>\"; };\n\t\t0649ABDA16E93103001323D2 /* RTSPPlayer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RTSPPlayer.m; sourceTree = \"<group>\"; };\n\t\t0649ABDE16E93112001323D2 /* AudioToolbox.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AudioToolbox.framework; path = System/Library/Frameworks/AudioToolbox.framework; sourceTree = SDKROOT; };\n\t\t0649ABE016E93227001323D2 /* sophie.mov */ = {isa = PBXFileReference; lastKnownFileType = video.quicktime; path = sophie.mov; sourceTree = \"<group>\"; };\n\t\t75601E451ABA00DF00C57B81 /* libiconv.dylib */ = {isa = PBXFileReference; lastKnownFileType = \"compiled.mach-o.dylib\"; name = libiconv.dylib; path = usr/lib/libiconv.dylib; sourceTree = SDKROOT; };\n/* End PBXFileReference section */\n\n/* Begin PBXFrameworksBuildPhase section */\n\t\t0649AB0D16E9200D001323D2 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t75601E461ABA00DF00C57B81 /* libiconv.dylib in Frameworks */,\n\t\t\t\t75601E3E1AB9FC3400C57B81 /* libavcodec.a in Frameworks */,\n\t\t\t\t75601E3F1AB9FC3400C57B81 /* libavdevice.a in Frameworks */,\n\t\t\t\t75601E401AB9FC3400C57B81 /* libavfilter.a in Frameworks */,\n\t\t\t\t75601E411AB9FC3400C57B81 /* libavformat.a in Frameworks */,\n\t\t\t\t75601E421AB9FC3400C57B81 /* libavutil.a in Frameworks */,\n\t\t\t\t75601E431AB9FC3400C57B81 /* libswresample.a in Frameworks */,\n\t\t\t\t75601E441AB9FC3400C57B81 /* libswscale.a in Frameworks */,\n\t\t\t\t0649ABDF16E93112001323D2 /* AudioToolbox.framework in Frameworks */,\n\t\t\t\t0649ABD016E92CBE001323D2 /* CoreVideo.framework in Frameworks */,\n\t\t\t\t0649ABCE16E92CB8001323D2 /* CoreImage.framework in Frameworks */,\n\t\t\t\t0649ABCC16E92CB1001323D2 /* CoreMedia.framework in Frameworks */,\n\t\t\t\t0649ABCA16E92CA7001323D2 /* QuartzCore.framework in Frameworks */,\n\t\t\t\t0649ABC816E92C9F001323D2 /* AVFoundation.framework in Frameworks */,\n\t\t\t\t0649ABC216E928E6001323D2 /* libz.dylib in Frameworks */,\n\t\t\t\t0649ABC016E928DF001323D2 /* libbz2.dylib in Frameworks */,\n\t\t\t\t0649AB1416E9200D001323D2 /* UIKit.framework in Frameworks */,\n\t\t\t\t0649AB1616E9200D001323D2 /* Foundation.framework in Frameworks */,\n\t\t\t\t0649AB1816E9200D001323D2 /* CoreGraphics.framework in Frameworks */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t0649AB3516E9200D001323D2 /* Frameworks */ = {\n\t\t\tisa = PBXFrameworksBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t0649AB3B16E9200D001323D2 /* SenTestingKit.framework in Frameworks */,\n\t\t\t\t0649AB3C16E9200D001323D2 /* UIKit.framework in Frameworks */,\n\t\t\t\t0649AB3D16E9200D001323D2 /* Foundation.framework in Frameworks */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXFrameworksBuildPhase section */\n\n/* Begin PBXGroup section */\n\t\t0649AB0716E9200D001323D2 = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB1916E9200D001323D2 /* DFURTSPPlayer */,\n\t\t\t\t0649AB4016E9200D001323D2 /* DFURTSPPlayerTests */,\n\t\t\t\t0649AB1216E9200D001323D2 /* Frameworks */,\n\t\t\t\t0649AB1116E9200D001323D2 /* Products */,\n\t\t\t);\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB1116E9200D001323D2 /* Products */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB1016E9200D001323D2 /* DFURTSPPlayer.app */,\n\t\t\t\t0649AB3916E9200D001323D2 /* DFURTSPPlayerTests.octest */,\n\t\t\t);\n\t\t\tname = Products;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB1216E9200D001323D2 /* Frameworks */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t75601E451ABA00DF00C57B81 /* libiconv.dylib */,\n\t\t\t\t0649ABC116E928E6001323D2 /* libz.dylib */,\n\t\t\t\t0649ABBF16E928DF001323D2 /* libbz2.dylib */,\n\t\t\t\t0649ABDE16E93112001323D2 /* AudioToolbox.framework */,\n\t\t\t\t0649ABCF16E92CBE001323D2 /* CoreVideo.framework */,\n\t\t\t\t0649ABCD16E92CB8001323D2 /* CoreImage.framework */,\n\t\t\t\t0649ABCB16E92CB1001323D2 /* CoreMedia.framework */,\n\t\t\t\t0649ABC916E92CA7001323D2 /* QuartzCore.framework */,\n\t\t\t\t0649ABC716E92C9F001323D2 /* AVFoundation.framework */,\n\t\t\t\t0649AB1316E9200D001323D2 /* UIKit.framework */,\n\t\t\t\t0649AB1516E9200D001323D2 /* Foundation.framework */,\n\t\t\t\t0649AB1716E9200D001323D2 /* CoreGraphics.framework */,\n\t\t\t\t0649AB3A16E9200D001323D2 /* SenTestingKit.framework */,\n\t\t\t);\n\t\t\tname = Frameworks;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB1916E9200D001323D2 /* DFURTSPPlayer */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB2216E9200D001323D2 /* DFUAppDelegate.h */,\n\t\t\t\t0649AB2316E9200D001323D2 /* DFUAppDelegate.m */,\n\t\t\t\t0649AB2B16E9200D001323D2 /* DFUViewController.h */,\n\t\t\t\t0649AB2C16E9200D001323D2 /* DFUViewController.m */,\n\t\t\t\t0649AB5A16E928BA001323D2 /* FFMpegiOS */,\n\t\t\t\t0649ABD116E92D17001323D2 /* FFMpegDecoder */,\n\t\t\t\t0649AB5116E92055001323D2 /* Resources */,\n\t\t\t\t0649AB1A16E9200D001323D2 /* Supporting Files */,\n\t\t\t);\n\t\t\tpath = DFURTSPPlayer;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB1A16E9200D001323D2 /* Supporting Files */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB1B16E9200D001323D2 /* DFURTSPPlayer-Info.plist */,\n\t\t\t\t0649AB1C16E9200D001323D2 /* InfoPlist.strings */,\n\t\t\t\t0649AB1F16E9200D001323D2 /* main.m */,\n\t\t\t\t0649AB2116E9200D001323D2 /* DFURTSPPlayer-Prefix.pch */,\n\t\t\t);\n\t\t\tname = \"Supporting Files\";\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB4016E9200D001323D2 /* DFURTSPPlayerTests */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB4616E9200D001323D2 /* DFURTSPPlayerTests.h */,\n\t\t\t\t0649AB4716E9200D001323D2 /* DFURTSPPlayerTests.m */,\n\t\t\t\t0649AB4116E9200D001323D2 /* Supporting Files */,\n\t\t\t);\n\t\t\tpath = DFURTSPPlayerTests;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB4116E9200D001323D2 /* Supporting Files */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB4216E9200D001323D2 /* DFURTSPPlayerTests-Info.plist */,\n\t\t\t\t0649AB4316E9200D001323D2 /* InfoPlist.strings */,\n\t\t\t);\n\t\t\tname = \"Supporting Files\";\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB5116E92055001323D2 /* Resources */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB5916E92086001323D2 /* XIB */,\n\t\t\t\t0649AB5216E92055001323D2 /* Default-568h@2x.png */,\n\t\t\t\t0649AB5316E92055001323D2 /* Default.png */,\n\t\t\t\t0649ABE016E93227001323D2 /* sophie.mov */,\n\t\t\t\t0649AB5416E92055001323D2 /* Default@2x.png */,\n\t\t\t);\n\t\t\tpath = Resources;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB5916E92086001323D2 /* XIB */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB2E16E9200D001323D2 /* DFUViewController_iPhone.xib */,\n\t\t\t\t0649AB3116E9200D001323D2 /* DFUViewController_iPad.xib */,\n\t\t\t);\n\t\t\tname = XIB;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB5A16E928BA001323D2 /* FFMpegiOS */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB5B16E928BA001323D2 /* include */,\n\t\t\t\t0649ABAE16E928BA001323D2 /* lib */,\n\t\t\t);\n\t\t\tpath = FFMpegiOS;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB5B16E928BA001323D2 /* include */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB5C16E928BA001323D2 /* libavcodec */,\n\t\t\t\t0649AB6616E928BA001323D2 /* libavdevice */,\n\t\t\t\t0649AB6916E928BA001323D2 /* libavfilter */,\n\t\t\t\t0649AB7116E928BA001323D2 /* libavformat */,\n\t\t\t\t0649AB7516E928BA001323D2 /* libavresample */,\n\t\t\t\t0649AB7816E928BA001323D2 /* libavutil */,\n\t\t\t\t0649ABA816E928BA001323D2 /* libswresample */,\n\t\t\t\t0649ABAB16E928BA001323D2 /* libswscale */,\n\t\t\t);\n\t\t\tpath = include;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB5C16E928BA001323D2 /* libavcodec */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB5D16E928BA001323D2 /* avcodec.h */,\n\t\t\t\t0649AB5E16E928BA001323D2 /* avfft.h */,\n\t\t\t\t0649AB5F16E928BA001323D2 /* dxva2.h */,\n\t\t\t\t0649AB6016E928BA001323D2 /* old_codec_ids.h */,\n\t\t\t\t0649AB6116E928BA001323D2 /* vaapi.h */,\n\t\t\t\t0649AB6216E928BA001323D2 /* vda.h */,\n\t\t\t\t0649AB6316E928BA001323D2 /* vdpau.h */,\n\t\t\t\t0649AB6416E928BA001323D2 /* version.h */,\n\t\t\t\t0649AB6516E928BA001323D2 /* xvmc.h */,\n\t\t\t);\n\t\t\tpath = libavcodec;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB6616E928BA001323D2 /* libavdevice */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB6716E928BA001323D2 /* avdevice.h */,\n\t\t\t\t0649AB6816E928BA001323D2 /* version.h */,\n\t\t\t);\n\t\t\tpath = libavdevice;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB6916E928BA001323D2 /* libavfilter */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB6A16E928BA001323D2 /* asrc_abuffer.h */,\n\t\t\t\t0649AB6B16E928BA001323D2 /* avcodec.h */,\n\t\t\t\t0649AB6C16E928BA001323D2 /* avfilter.h */,\n\t\t\t\t0649AB6D16E928BA001323D2 /* avfiltergraph.h */,\n\t\t\t\t0649AB6E16E928BA001323D2 /* buffersink.h */,\n\t\t\t\t0649AB6F16E928BA001323D2 /* buffersrc.h */,\n\t\t\t\t0649AB7016E928BA001323D2 /* version.h */,\n\t\t\t);\n\t\t\tpath = libavfilter;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB7116E928BA001323D2 /* libavformat */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB7216E928BA001323D2 /* avformat.h */,\n\t\t\t\t0649AB7316E928BA001323D2 /* avio.h */,\n\t\t\t\t0649AB7416E928BA001323D2 /* version.h */,\n\t\t\t);\n\t\t\tpath = libavformat;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB7516E928BA001323D2 /* libavresample */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB7616E928BA001323D2 /* avresample.h */,\n\t\t\t\t0649AB7716E928BA001323D2 /* version.h */,\n\t\t\t);\n\t\t\tpath = libavresample;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB7816E928BA001323D2 /* libavutil */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB7916E928BA001323D2 /* adler32.h */,\n\t\t\t\t0649AB7A16E928BA001323D2 /* aes.h */,\n\t\t\t\t0649AB7B16E928BA001323D2 /* attributes.h */,\n\t\t\t\t0649AB7C16E928BA001323D2 /* audio_fifo.h */,\n\t\t\t\t0649AB7D16E928BA001323D2 /* audioconvert.h */,\n\t\t\t\t0649AB7E16E928BA001323D2 /* avassert.h */,\n\t\t\t\t0649AB7F16E928BA001323D2 /* avconfig.h */,\n\t\t\t\t0649AB8016E928BA001323D2 /* avstring.h */,\n\t\t\t\t0649AB8116E928BA001323D2 /* avutil.h */,\n\t\t\t\t0649AB8216E928BA001323D2 /* base64.h */,\n\t\t\t\t0649AB8316E928BA001323D2 /* blowfish.h */,\n\t\t\t\t0649AB8416E928BA001323D2 /* bprint.h */,\n\t\t\t\t0649AB8516E928BA001323D2 /* bswap.h */,\n\t\t\t\t0649AB8616E928BA001323D2 /* channel_layout.h */,\n\t\t\t\t0649AB8716E928BA001323D2 /* common.h */,\n\t\t\t\t0649AB8816E928BA001323D2 /* cpu.h */,\n\t\t\t\t0649AB8916E928BA001323D2 /* crc.h */,\n\t\t\t\t0649AB8A16E928BA001323D2 /* dict.h */,\n\t\t\t\t0649AB8B16E928BA001323D2 /* error.h */,\n\t\t\t\t0649AB8C16E928BA001323D2 /* eval.h */,\n\t\t\t\t0649AB8D16E928BA001323D2 /* fifo.h */,\n\t\t\t\t0649AB8E16E928BA001323D2 /* file.h */,\n\t\t\t\t0649AB8F16E928BA001323D2 /* hmac.h */,\n\t\t\t\t0649AB9016E928BA001323D2 /* imgutils.h */,\n\t\t\t\t0649AB9116E928BA001323D2 /* intfloat.h */,\n\t\t\t\t0649AB9216E928BA001323D2 /* intfloat_readwrite.h */,\n\t\t\t\t0649AB9316E928BA001323D2 /* intreadwrite.h */,\n\t\t\t\t0649AB9416E928BA001323D2 /* lfg.h */,\n\t\t\t\t0649AB9516E928BA001323D2 /* log.h */,\n\t\t\t\t0649AB9616E928BA001323D2 /* lzo.h */,\n\t\t\t\t0649AB9716E928BA001323D2 /* mathematics.h */,\n\t\t\t\t0649AB9816E928BA001323D2 /* md5.h */,\n\t\t\t\t0649AB9916E928BA001323D2 /* mem.h */,\n\t\t\t\t0649AB9A16E928BA001323D2 /* old_pix_fmts.h */,\n\t\t\t\t0649AB9B16E928BA001323D2 /* opt.h */,\n\t\t\t\t0649AB9C16E928BA001323D2 /* parseutils.h */,\n\t\t\t\t0649AB9D16E928BA001323D2 /* pixdesc.h */,\n\t\t\t\t0649AB9E16E928BA001323D2 /* pixfmt.h */,\n\t\t\t\t0649AB9F16E928BA001323D2 /* random_seed.h */,\n\t\t\t\t0649ABA016E928BA001323D2 /* rational.h */,\n\t\t\t\t0649ABA116E928BA001323D2 /* samplefmt.h */,\n\t\t\t\t0649ABA216E928BA001323D2 /* sha.h */,\n\t\t\t\t0649ABA316E928BA001323D2 /* time.h */,\n\t\t\t\t0649ABA416E928BA001323D2 /* timecode.h */,\n\t\t\t\t0649ABA516E928BA001323D2 /* timestamp.h */,\n\t\t\t\t0649ABA616E928BA001323D2 /* version.h */,\n\t\t\t\t0649ABA716E928BA001323D2 /* xtea.h */,\n\t\t\t);\n\t\t\tpath = libavutil;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649ABA816E928BA001323D2 /* libswresample */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649ABA916E928BA001323D2 /* swresample.h */,\n\t\t\t\t0649ABAA16E928BA001323D2 /* version.h */,\n\t\t\t);\n\t\t\tpath = libswresample;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649ABAB16E928BA001323D2 /* libswscale */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649ABAC16E928BA001323D2 /* swscale.h */,\n\t\t\t\t0649ABAD16E928BA001323D2 /* version.h */,\n\t\t\t);\n\t\t\tpath = libswscale;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649ABAE16E928BA001323D2 /* lib */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649ABAF16E928BA001323D2 /* libavcodec.a */,\n\t\t\t\t0649ABB016E928BA001323D2 /* libavdevice.a */,\n\t\t\t\t0649ABB116E928BA001323D2 /* libavfilter.a */,\n\t\t\t\t0649ABB216E928BA001323D2 /* libavformat.a */,\n\t\t\t\t0649ABB316E928BA001323D2 /* libavresample.a */,\n\t\t\t\t0649ABB416E928BA001323D2 /* libavutil.a */,\n\t\t\t\t0649ABB516E928BA001323D2 /* libswresample.a */,\n\t\t\t\t0649ABB616E928BA001323D2 /* libswscale.a */,\n\t\t\t);\n\t\t\tpath = lib;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649ABD116E92D17001323D2 /* FFMpegDecoder */ = {\n\t\t\tisa = PBXGroup;\n\t\t\tchildren = (\n\t\t\t\t0649ABD516E93103001323D2 /* AudioStreamer.h */,\n\t\t\t\t0649ABD616E93103001323D2 /* AudioStreamer.m */,\n\t\t\t\t0649ABD716E93103001323D2 /* Utilities.h */,\n\t\t\t\t0649ABD816E93103001323D2 /* Utilities.m */,\n\t\t\t\t0649ABD916E93103001323D2 /* RTSPPlayer.h */,\n\t\t\t\t0649ABDA16E93103001323D2 /* RTSPPlayer.m */,\n\t\t\t);\n\t\t\tpath = FFMpegDecoder;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXGroup section */\n\n/* Begin PBXNativeTarget section */\n\t\t0649AB0F16E9200D001323D2 /* DFURTSPPlayer */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 0649AB4B16E9200D001323D2 /* Build configuration list for PBXNativeTarget \"DFURTSPPlayer\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t0649AB0C16E9200D001323D2 /* Sources */,\n\t\t\t\t0649AB0D16E9200D001323D2 /* Frameworks */,\n\t\t\t\t0649AB0E16E9200D001323D2 /* Resources */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t);\n\t\t\tname = DFURTSPPlayer;\n\t\t\tproductName = DFURTSPPlayer;\n\t\t\tproductReference = 0649AB1016E9200D001323D2 /* DFURTSPPlayer.app */;\n\t\t\tproductType = \"com.apple.product-type.application\";\n\t\t};\n\t\t0649AB3816E9200D001323D2 /* DFURTSPPlayerTests */ = {\n\t\t\tisa = PBXNativeTarget;\n\t\t\tbuildConfigurationList = 0649AB4E16E9200D001323D2 /* Build configuration list for PBXNativeTarget \"DFURTSPPlayerTests\" */;\n\t\t\tbuildPhases = (\n\t\t\t\t0649AB3416E9200D001323D2 /* Sources */,\n\t\t\t\t0649AB3516E9200D001323D2 /* Frameworks */,\n\t\t\t\t0649AB3616E9200D001323D2 /* Resources */,\n\t\t\t\t0649AB3716E9200D001323D2 /* ShellScript */,\n\t\t\t);\n\t\t\tbuildRules = (\n\t\t\t);\n\t\t\tdependencies = (\n\t\t\t\t0649AB3F16E9200D001323D2 /* PBXTargetDependency */,\n\t\t\t);\n\t\t\tname = DFURTSPPlayerTests;\n\t\t\tproductName = DFURTSPPlayerTests;\n\t\t\tproductReference = 0649AB3916E9200D001323D2 /* DFURTSPPlayerTests.octest */;\n\t\t\tproductType = \"com.apple.product-type.bundle.ocunit-test\";\n\t\t};\n/* End PBXNativeTarget section */\n\n/* Begin PBXProject section */\n\t\t0649AB0816E9200D001323D2 /* Project object */ = {\n\t\t\tisa = PBXProject;\n\t\t\tattributes = {\n\t\t\t\tCLASSPREFIX = DFU;\n\t\t\t\tLastUpgradeCheck = 0460;\n\t\t\t\tORGANIZATIONNAME = \"Bogdan Furdui\";\n\t\t\t};\n\t\t\tbuildConfigurationList = 0649AB0B16E9200D001323D2 /* Build configuration list for PBXProject \"DFURTSPPlayer\" */;\n\t\t\tcompatibilityVersion = \"Xcode 3.2\";\n\t\t\tdevelopmentRegion = English;\n\t\t\thasScannedForEncodings = 0;\n\t\t\tknownRegions = (\n\t\t\t\ten,\n\t\t\t);\n\t\t\tmainGroup = 0649AB0716E9200D001323D2;\n\t\t\tproductRefGroup = 0649AB1116E9200D001323D2 /* Products */;\n\t\t\tprojectDirPath = \"\";\n\t\t\tprojectRoot = \"\";\n\t\t\ttargets = (\n\t\t\t\t0649AB0F16E9200D001323D2 /* DFURTSPPlayer */,\n\t\t\t\t0649AB3816E9200D001323D2 /* DFURTSPPlayerTests */,\n\t\t\t);\n\t\t};\n/* End PBXProject section */\n\n/* Begin PBXResourcesBuildPhase section */\n\t\t0649AB0E16E9200D001323D2 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t0649AB1E16E9200D001323D2 /* InfoPlist.strings in Resources */,\n\t\t\t\t0649AB3016E9200D001323D2 /* DFUViewController_iPhone.xib in Resources */,\n\t\t\t\t0649AB3316E9200D001323D2 /* DFUViewController_iPad.xib in Resources */,\n\t\t\t\t0649AB5516E92055001323D2 /* Default-568h@2x.png in Resources */,\n\t\t\t\t0649AB5616E92055001323D2 /* Default.png in Resources */,\n\t\t\t\t0649AB5716E92055001323D2 /* Default@2x.png in Resources */,\n\t\t\t\t0649ABE116E93227001323D2 /* sophie.mov in Resources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t0649AB3616E9200D001323D2 /* Resources */ = {\n\t\t\tisa = PBXResourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t0649AB4516E9200D001323D2 /* InfoPlist.strings in Resources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXResourcesBuildPhase section */\n\n/* Begin PBXShellScriptBuildPhase section */\n\t\t0649AB3716E9200D001323D2 /* ShellScript */ = {\n\t\t\tisa = PBXShellScriptBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t);\n\t\t\tinputPaths = (\n\t\t\t);\n\t\t\toutputPaths = (\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t\tshellPath = /bin/sh;\n\t\t\tshellScript = \"# Run the unit tests in this test bundle.\\n\\\"${SYSTEM_DEVELOPER_DIR}/Tools/RunUnitTests\\\"\\n\";\n\t\t};\n/* End PBXShellScriptBuildPhase section */\n\n/* Begin PBXSourcesBuildPhase section */\n\t\t0649AB0C16E9200D001323D2 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t0649AB2016E9200D001323D2 /* main.m in Sources */,\n\t\t\t\t0649AB2416E9200D001323D2 /* DFUAppDelegate.m in Sources */,\n\t\t\t\t0649AB2D16E9200D001323D2 /* DFUViewController.m in Sources */,\n\t\t\t\t0649ABDB16E93103001323D2 /* AudioStreamer.m in Sources */,\n\t\t\t\t0649ABDC16E93103001323D2 /* Utilities.m in Sources */,\n\t\t\t\t0649ABDD16E93103001323D2 /* RTSPPlayer.m in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n\t\t0649AB3416E9200D001323D2 /* Sources */ = {\n\t\t\tisa = PBXSourcesBuildPhase;\n\t\t\tbuildActionMask = 2147483647;\n\t\t\tfiles = (\n\t\t\t\t0649AB4816E9200D001323D2 /* DFURTSPPlayerTests.m in Sources */,\n\t\t\t);\n\t\t\trunOnlyForDeploymentPostprocessing = 0;\n\t\t};\n/* End PBXSourcesBuildPhase section */\n\n/* Begin PBXTargetDependency section */\n\t\t0649AB3F16E9200D001323D2 /* PBXTargetDependency */ = {\n\t\t\tisa = PBXTargetDependency;\n\t\t\ttarget = 0649AB0F16E9200D001323D2 /* DFURTSPPlayer */;\n\t\t\ttargetProxy = 0649AB3E16E9200D001323D2 /* PBXContainerItemProxy */;\n\t\t};\n/* End PBXTargetDependency section */\n\n/* Begin PBXVariantGroup section */\n\t\t0649AB1C16E9200D001323D2 /* InfoPlist.strings */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB1D16E9200D001323D2 /* en */,\n\t\t\t);\n\t\t\tname = InfoPlist.strings;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB2E16E9200D001323D2 /* DFUViewController_iPhone.xib */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB2F16E9200D001323D2 /* en */,\n\t\t\t);\n\t\t\tname = DFUViewController_iPhone.xib;\n\t\t\tpath = ..;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB3116E9200D001323D2 /* DFUViewController_iPad.xib */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB3216E9200D001323D2 /* en */,\n\t\t\t);\n\t\t\tname = DFUViewController_iPad.xib;\n\t\t\tpath = ..;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n\t\t0649AB4316E9200D001323D2 /* InfoPlist.strings */ = {\n\t\t\tisa = PBXVariantGroup;\n\t\t\tchildren = (\n\t\t\t\t0649AB4416E9200D001323D2 /* en */,\n\t\t\t);\n\t\t\tname = InfoPlist.strings;\n\t\t\tsourceTree = \"<group>\";\n\t\t};\n/* End PBXVariantGroup section */\n\n/* Begin XCBuildConfiguration section */\n\t\t0649AB4916E9200D001323D2 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++0x\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\t\"CODE_SIGN_IDENTITY[sdk=iphoneos*]\" = \"iPhone Developer\";\n\t\t\t\tCOPY_PHASE_STRIP = NO;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu99;\n\t\t\t\tGCC_DYNAMIC_NO_PIC = NO;\n\t\t\t\tGCC_OPTIMIZATION_LEVEL = 0;\n\t\t\t\tGCC_PREPROCESSOR_DEFINITIONS = (\n\t\t\t\t\t\"DEBUG=1\",\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t);\n\t\t\t\tGCC_SYMBOLS_PRIVATE_EXTERN = NO;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 8.0;\n\t\t\t\tONLY_ACTIVE_ARCH = YES;\n\t\t\t\tSDKROOT = iphoneos;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t0649AB4A16E9200D001323D2 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tCLANG_CXX_LANGUAGE_STANDARD = \"gnu++0x\";\n\t\t\t\tCLANG_CXX_LIBRARY = \"libc++\";\n\t\t\t\tCLANG_WARN_CONSTANT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_EMPTY_BODY = YES;\n\t\t\t\tCLANG_WARN_ENUM_CONVERSION = YES;\n\t\t\t\tCLANG_WARN_INT_CONVERSION = YES;\n\t\t\t\tCLANG_WARN__DUPLICATE_METHOD_MATCH = YES;\n\t\t\t\t\"CODE_SIGN_IDENTITY[sdk=iphoneos*]\" = \"iPhone Developer\";\n\t\t\t\tCOPY_PHASE_STRIP = YES;\n\t\t\t\tGCC_C_LANGUAGE_STANDARD = gnu99;\n\t\t\t\tGCC_WARN_ABOUT_RETURN_TYPE = YES;\n\t\t\t\tGCC_WARN_UNINITIALIZED_AUTOS = YES;\n\t\t\t\tGCC_WARN_UNUSED_VARIABLE = YES;\n\t\t\t\tIPHONEOS_DEPLOYMENT_TARGET = 8.0;\n\t\t\t\tOTHER_CFLAGS = \"-DNS_BLOCK_ASSERTIONS=1\";\n\t\t\t\tSDKROOT = iphoneos;\n\t\t\t\tTARGETED_DEVICE_FAMILY = \"1,2\";\n\t\t\t\tVALIDATE_PRODUCT = YES;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t0649AB4C16E9200D001323D2 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tGCC_PRECOMPILE_PREFIX_HEADER = YES;\n\t\t\t\tGCC_PREFIX_HEADER = \"DFURTSPPlayer/DFURTSPPlayer-Prefix.pch\";\n\t\t\t\tHEADER_SEARCH_PATHS = (\n\t\t\t\t\t\"$(SRCROOT)/FFMpegiOS/include/**\",\n\t\t\t\t\t\"$(PROJECT_DIR)/DFURTSPPlayer/FFMpegiOS/include\",\n\t\t\t\t);\n\t\t\t\tINFOPLIST_FILE = \"DFURTSPPlayer/DFURTSPPlayer-Info.plist\";\n\t\t\t\tLIBRARY_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"\\\"$(SRCROOT)/DFURTSPPlayer/FFMpegiOS/lib\\\"\",\n\t\t\t\t\t\"$(PROJECT_DIR)/DFURTSPPlayer/FFMpegiOS/lib\",\n\t\t\t\t);\n\t\t\t\tOTHER_LDFLAGS = (\n\t\t\t\t\t\"-all_load\",\n\t\t\t\t\t\"-Objc\",\n\t\t\t\t);\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tUSER_HEADER_SEARCH_PATHS = \"\";\n\t\t\t\tWRAPPER_EXTENSION = app;\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t0649AB4D16E9200D001323D2 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tALWAYS_SEARCH_USER_PATHS = NO;\n\t\t\t\tGCC_PRECOMPILE_PREFIX_HEADER = YES;\n\t\t\t\tGCC_PREFIX_HEADER = \"DFURTSPPlayer/DFURTSPPlayer-Prefix.pch\";\n\t\t\t\tHEADER_SEARCH_PATHS = (\n\t\t\t\t\t\"$(SRCROOT)/FFMpegiOS/include/**\",\n\t\t\t\t\t\"$(PROJECT_DIR)/DFURTSPPlayer/FFMpegiOS/include\",\n\t\t\t\t);\n\t\t\t\tINFOPLIST_FILE = \"DFURTSPPlayer/DFURTSPPlayer-Info.plist\";\n\t\t\t\tLIBRARY_SEARCH_PATHS = (\n\t\t\t\t\t\"$(inherited)\",\n\t\t\t\t\t\"\\\"$(SRCROOT)/DFURTSPPlayer/FFMpegiOS/lib\\\"\",\n\t\t\t\t\t\"$(PROJECT_DIR)/DFURTSPPlayer/FFMpegiOS/lib\",\n\t\t\t\t);\n\t\t\t\tOTHER_LDFLAGS = (\n\t\t\t\t\t\"-all_load\",\n\t\t\t\t\t\"-Objc\",\n\t\t\t\t);\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tUSER_HEADER_SEARCH_PATHS = \"\";\n\t\t\t\tWRAPPER_EXTENSION = app;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n\t\t0649AB4F16E9200D001323D2 /* Debug */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(BUILT_PRODUCTS_DIR)/DFURTSPPlayer.app/DFURTSPPlayer\";\n\t\t\t\tFRAMEWORK_SEARCH_PATHS = (\n\t\t\t\t\t\"\\\"$(SDKROOT)/Developer/Library/Frameworks\\\"\",\n\t\t\t\t\t\"\\\"$(DEVELOPER_LIBRARY_DIR)/Frameworks\\\"\",\n\t\t\t\t);\n\t\t\t\tGCC_PRECOMPILE_PREFIX_HEADER = YES;\n\t\t\t\tGCC_PREFIX_HEADER = \"DFURTSPPlayer/DFURTSPPlayer-Prefix.pch\";\n\t\t\t\tINFOPLIST_FILE = \"DFURTSPPlayerTests/DFURTSPPlayerTests-Info.plist\";\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tTEST_HOST = \"$(BUNDLE_LOADER)\";\n\t\t\t\tWRAPPER_EXTENSION = octest;\n\t\t\t};\n\t\t\tname = Debug;\n\t\t};\n\t\t0649AB5016E9200D001323D2 /* Release */ = {\n\t\t\tisa = XCBuildConfiguration;\n\t\t\tbuildSettings = {\n\t\t\t\tBUNDLE_LOADER = \"$(BUILT_PRODUCTS_DIR)/DFURTSPPlayer.app/DFURTSPPlayer\";\n\t\t\t\tFRAMEWORK_SEARCH_PATHS = (\n\t\t\t\t\t\"\\\"$(SDKROOT)/Developer/Library/Frameworks\\\"\",\n\t\t\t\t\t\"\\\"$(DEVELOPER_LIBRARY_DIR)/Frameworks\\\"\",\n\t\t\t\t);\n\t\t\t\tGCC_PRECOMPILE_PREFIX_HEADER = YES;\n\t\t\t\tGCC_PREFIX_HEADER = \"DFURTSPPlayer/DFURTSPPlayer-Prefix.pch\";\n\t\t\t\tINFOPLIST_FILE = \"DFURTSPPlayerTests/DFURTSPPlayerTests-Info.plist\";\n\t\t\t\tPRODUCT_NAME = \"$(TARGET_NAME)\";\n\t\t\t\tTEST_HOST = \"$(BUNDLE_LOADER)\";\n\t\t\t\tWRAPPER_EXTENSION = octest;\n\t\t\t};\n\t\t\tname = Release;\n\t\t};\n/* End XCBuildConfiguration section */\n\n/* Begin XCConfigurationList section */\n\t\t0649AB0B16E9200D001323D2 /* Build configuration list for PBXProject \"DFURTSPPlayer\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t0649AB4916E9200D001323D2 /* Debug */,\n\t\t\t\t0649AB4A16E9200D001323D2 /* Release */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t0649AB4B16E9200D001323D2 /* Build configuration list for PBXNativeTarget \"DFURTSPPlayer\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t0649AB4C16E9200D001323D2 /* Debug */,\n\t\t\t\t0649AB4D16E9200D001323D2 /* Release */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n\t\t0649AB4E16E9200D001323D2 /* Build configuration list for PBXNativeTarget \"DFURTSPPlayerTests\" */ = {\n\t\t\tisa = XCConfigurationList;\n\t\t\tbuildConfigurations = (\n\t\t\t\t0649AB4F16E9200D001323D2 /* Debug */,\n\t\t\t\t0649AB5016E9200D001323D2 /* Release */,\n\t\t\t);\n\t\t\tdefaultConfigurationIsVisible = 0;\n\t\t\tdefaultConfigurationName = Release;\n\t\t};\n/* End XCConfigurationList section */\n\t};\n\trootObject = 0649AB0816E9200D001323D2 /* Project object */;\n}\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayerTests/DFURTSPPlayerTests-Info.plist",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\t<key>CFBundleDevelopmentRegion</key>\n\t<string>en</string>\n\t<key>CFBundleExecutable</key>\n\t<string>${EXECUTABLE_NAME}</string>\n\t<key>CFBundleIdentifier</key>\n\t<string>com.clujtech.ro.${PRODUCT_NAME:rfc1034identifier}</string>\n\t<key>CFBundleInfoDictionaryVersion</key>\n\t<string>6.0</string>\n\t<key>CFBundlePackageType</key>\n\t<string>BNDL</string>\n\t<key>CFBundleShortVersionString</key>\n\t<string>1.0</string>\n\t<key>CFBundleSignature</key>\n\t<string>????</string>\n\t<key>CFBundleVersion</key>\n\t<string>1</string>\n</dict>\n</plist>\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayerTests/DFURTSPPlayerTests.h",
    "content": "//\n//  DFURTSPPlayerTests.h\n//  DFURTSPPlayerTests\n//\n//  Created by Bogdan Furdui on 3/7/13.\n//  Copyright (c) 2013 Bogdan Furdui. All rights reserved.\n//\n\n#import <SenTestingKit/SenTestingKit.h>\n\n@interface DFURTSPPlayerTests : SenTestCase\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayerTests/DFURTSPPlayerTests.m",
    "content": "//\n//  DFURTSPPlayerTests.m\n//  DFURTSPPlayerTests\n//\n//  Created by Bogdan Furdui on 3/7/13.\n//  Copyright (c) 2013 Bogdan Furdui. All rights reserved.\n//\n\n#import \"DFURTSPPlayerTests.h\"\n\n@implementation DFURTSPPlayerTests\n\n- (void)setUp\n{\n    [super setUp];\n    \n    // Set-up code here.\n}\n\n- (void)tearDown\n{\n    // Tear-down code here.\n    \n    [super tearDown];\n}\n\n- (void)testExample\n{\n    STFail(@\"Unit tests are not implemented yet in DFURTSPPlayerTests\");\n}\n\n@end\n"
  },
  {
    "path": "DFURTSPPlayer/DFURTSPPlayerTests/en.lproj/InfoPlist.strings",
    "content": "/* Localized versions of Info.plist keys */\n\n"
  },
  {
    "path": "README.md",
    "content": "DFURTSPPlayer\n=============\n\nRTSP player for iOS with ffmpeg.\nThis project uses slightly modified classes from \"mooncatventures-group\" (https://github.com/mooncatventures-group).\n"
  }
]